diff options
358 files changed, 22672 insertions, 11943 deletions
diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 98c577e..eb1ca03 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.12.1 +current_version = 1.13.0 commit = True tag = True diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b41b835..2e27a96 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -89,7 +89,7 @@ jobs: run: | python3 -m venv venv . venv/bin/activate - pip install pip==21.1 + pip install pip pip install poetry poetry install python -c "import sys; print(sys.version)" diff --git a/.github/workflows/test-binary-build.yaml b/.github/workflows/test-binary-build.yaml index 8d0a5fe..11e720a 100644 --- a/.github/workflows/test-binary-build.yaml +++ b/.github/workflows/test-binary-build.yaml @@ -36,7 +36,7 @@ jobs: run: | python3 -m venv venv . venv/bin/activate - pip install pip==21.1 + pip install pip pip install poetry poetry install python -c "import sys; print(sys.version)" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 114c3b7..01a9dfd 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -11,8 +11,8 @@ jobs: name: Pytest strategy: matrix: - os: [ubuntu-latest] - python: ['3.6', '3.7', '3.8', '3.9', '3.10'] + os: ["ubuntu-20.04"] + python: ["3.7", "3.8", "3.9", "3.10", "3.11.1"] redis: [5, 6, 7] runs-on: ${{ matrix.os }} @@ -25,21 +25,23 @@ jobs: steps: - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - architecture: 'x64' + architecture: "x64" - name: Cache venv uses: actions/cache@v2 with: path: venv # Look to see if there is a cache hit for the corresponding requirements file - key: poetryenv-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('poetry.lock') }} + key: + poetryenv-${{ matrix.os }}-${{ matrix.python }}-${{ + hashFiles('poetry.lock') }} - name: Install Dependencies run: | python3 -m venv venv . venv/bin/activate - pip install -U pip==21.1 setuptools + pip install -U pip setuptools pip install poetry poetry install python -c "import sys; print(sys.version)" @@ -58,12 +60,12 @@ jobs: - uses: actions/checkout@v2 - uses: codespell-project/actions-codespell@master with: - ignore_words_list: fo,ists,oll,optin,ot,smove,tre,whe - exclude_file: docs/assets/demo.svg - - uses: actions/setup-python@v1 + ignore_words_list: fo,ists,oll,optin,ot,smove,tre,whe,EXAT,exat + skip: ./docs/assets/demo.svg,./iredis/data/commands.json,./iredis/data/commands/*,./tests/unittests/* + - uses: actions/setup-python@v4 with: python-version: 3.7 - architecture: 'x64' + architecture: "x64" - name: Cache venv uses: actions/cache@v2 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index f190414..49329ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ ## UPCOMING +## 1.3 + +- Dependency: Drop Python 3.6 support. +- Bugfix: fix some typos. + +### 1.12.2 + +- Feature: IRedis now honors the `ssl_cert_reqs` strategy, either specifying it via + command line (`--verify-ssl=<none|optional|required>`) or as an url parameter (`ssl_cert_reqs`) + when the connection is secured via tls (`rediss://`). (authored by [torrefatto]) + +### 1.12.1 + - Feature: support new command: `HRANDFIELD`. - Bugfix: all tests pass on redis:7 now. - Feature: IRedis now accept `username` for auth, redis server version under 6 @@ -294,3 +307,4 @@ [sid-maddy]: https://github.com/sid-maddy [tssujt]: https://github.com/tssujt [aymericbeaumet]: https://github.com/aymericbeaumet +[torrefatto]: https://github.com/torrefatto @@ -126,7 +126,7 @@ supports similar options like redis-cli, like `-h` for redis-server's host and ``` $ iredis --help -Usage: [OPTIONS] [CMD]... +Usage: iredis [OPTIONS] [CMD]... IRedis: Interactive Redis @@ -143,39 +143,60 @@ Usage: [OPTIONS] [CMD]... settings. Options: - -h TEXT Server hostname (default: 127.0.0.1). - -p TEXT Server port (default: 6379). - -s, --socket TEXT Server socket (overrides hostname and port). - -n TEXT Database number.(overwrites dsn/url's db number) - -a, --password TEXT Password to use when connecting to the server. - --url TEXT Use Redis URL to indicate connection(Can set with - env `IREDIS_URL`), Example: - redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 - unix://[[username]:[password]]@/path/to/socket.soc - k?db=0 - - -d, --dsn TEXT Use DSN configured into the [alias_dsn] section of - iredisrc file. (Can set with env `IREDIS_DSN`) - - --newbie / --no-newbie Show command hints and useful helps. - --iredisrc TEXT Config file for iredis, default is ~/.iredisrc. - --decode TEXT decode response, default is No decode, which will - output all bytes literals. - - --client_name TEXT Assign a name to the current connection. - --raw / --no-raw Use raw formatting for replies (default when - STDOUT is not a tty). However, you can use --no- - raw to force formatted output even when STDOUT is - not a tty. - - --rainbow / --no-rainbow Display colorful prompt. - --shell / --no-shell Allow to run shell commands, default to True. - --pager / --no-pager Using pager when output is too tall for your - window, default to True. - - --version Show the version and exit. - --help Show this message and exit. + -h TEXT Server hostname (default: 127.0.0.1). + -p TEXT Server port (default: 6379). + -s, --socket TEXT Server socket (overrides hostname and port). + -n INTEGER Database number.(overwrites dsn/url's db + number) + + -u, --username TEXT User name used to auth, will be ignore for + redis version < 6. + + -a, --password TEXT Password to use when connecting to the + server. + + --url TEXT Use Redis URL to indicate connection(Can set + with env `IREDIS_URL`), Example: redis:/ + /[[username]:[password]]@localhost:6379/0 + rediss://[[username]:[password]]@localhost:6 + 379/0 unix://[[username]:[password]]@/pa + th/to/socket.sock?db=0 + + -d, --dsn TEXT Use DSN configured into the [alias_dsn] + section of iredisrc file. (Can set with env + `IREDIS_DSN`) + + --newbie / --no-newbie Show command hints and useful helps. + --iredisrc TEXT Config file for iredis, default is + ~/.iredisrc. + + --decode TEXT decode response, default is No decode, which + will output all bytes literals. + + --client_name TEXT Assign a name to the current connection. + --raw / --no-raw Use raw formatting for replies (default when + STDOUT is not a tty). However, you can use + --no-raw to force formatted output even when + STDOUT is not a tty. + + --rainbow / --no-rainbow Display colorful prompt. + --shell / --no-shell Allow to run shell commands, default to + True. + + --pager / --no-pager Using pager when output is too tall for your + window, default to True. + + --verify-ssl [none|optional|required] + Set the TLS certificate verification + strategy + + --prompt TEXT Prompt format (supported interpolations: + {client_name}, {db}, {host}, {path}, {port}, + {username}, {client_addr}, {client_id}). + + --version Show the version and exit. + --help Show this message and exit. + ``` ### Using DSN @@ -215,6 +236,13 @@ interpolations: - `{client_addr}` - `{client_id}` +The `--prompt` utilize +[Python String format engine](https://docs.python.org/3/library/string.html#formatstrings), +so as long as it is a valid string formatter, it will work( anything that +`"<your prompt>".format(...)` accepts). For example, you can limit your Redis +server host name's length to 5 by setting `--prompt` to +`iredis --prompt '{host:.5s}'`. + ### Configuration IRedis supports config files. Command-line options will always take precedence diff --git a/iredis/__init__.py b/iredis/__init__.py index 438a38d..9a34ccc 100644 --- a/iredis/__init__.py +++ b/iredis/__init__.py @@ -1 +1 @@ -__version__ = "1.12.1" +__version__ = "1.13.0" diff --git a/iredis/client.py b/iredis/client.py index 6555ff1..bbdb87c 100644 --- a/iredis/client.py +++ b/iredis/client.py @@ -20,7 +20,6 @@ from redis.exceptions import ( ResponseError, ) - from . import markdown, renders from .data import commands as commands_data from .commands import ( @@ -64,6 +63,7 @@ class Client: username=None, client_name=None, prompt=None, + verify_ssl=None, ): self.host = host self.port = port @@ -81,6 +81,8 @@ class Client: if prompt: self.prompt = prompt + self.verify_ssl = verify_ssl or "required" + self.client_id = None self.client_addr = None @@ -125,6 +127,7 @@ class Client: self.path, self.scheme, self.username, + self.verify_ssl, client_name=self.client_name, ) @@ -137,6 +140,7 @@ class Client: path=None, scheme="redis", username=None, + verify_ssl=None, client_name=None, ): if scheme in ("redis", "rediss"): @@ -154,6 +158,7 @@ class Client: connection_kwargs["username"] = username if scheme == "rediss": + connection_kwargs["ssl_cert_reqs"] = verify_ssl connection_class = SSLConnection else: connection_class = Connection @@ -286,6 +291,9 @@ class Client: last_error = e retry_times -= 1 need_refresh_connection = True + except redis.exceptions.ExecAbortError: + config.transaction = False + raise except ResponseError as e: response_message = str(e) if response_message.startswith("MOVED"): @@ -294,9 +302,6 @@ class Client: ) raise e - except redis.exceptions.ExecAbortError: - config.transaction = False - raise except KeyboardInterrupt: logger.warning("received KeyboardInterrupt... rebuild connection...") connection.disconnect() @@ -308,7 +313,8 @@ class Client: return None else: return response - raise last_error + if last_error: + raise last_error def reissue_with_redirect(self, response, *args, **kwargs): """ @@ -318,13 +324,21 @@ class Client: This feature is not supported for unix socket connection. """ # Redis Cluster only supports database zero. - _, slot, ip_port = response.split(" ") + _, _, ip_port = response.split(" ") ip, port = ip_port.split(":") port = int(port) print(response, file=sys.stderr) - connection = self.create_connection(ip, port) + connection = self.create_connection( + ip, + port, + username=self.username, + password=self.password, + path=self.path, + scheme=self.scheme, + client_name=self.client_name, + ) # if user sets dsn for dest node # use username and password from dsn settings if config.alias_dsn: diff --git a/iredis/config.py b/iredis/config.py index a62101c..ea27827 100644 --- a/iredis/config.py +++ b/iredis/config.py @@ -8,7 +8,6 @@ from . import data as project_data # TODO verbose logger to print to stdout logger = logging.getLogger(__name__) - system_config_file = "/etc/iredisrc" pwd_config_file = os.path.join(os.getcwd(), ".iredisrc") @@ -40,6 +39,7 @@ class Config: self.shell = None self.enable_pager = None self.pager = None + self.verify_ssl = None self.warning = True diff --git a/iredis/data/commands.json b/iredis/data/commands.json index 74093cc..978e128 100644 --- a/iredis/data/commands.json +++ b/iredis/data/commands.json @@ -1,5659 +1,17102 @@ { - "ACL LOAD": { - "summary": "Reload the ACLs from the configured ACL file", - "complexity": "O(N). Where N is the number of configured users.", - "since": "6.0.0", - "group": "server" - }, - "ACL SAVE": { - "summary": "Save the current ACL rules in the configured ACL file", - "complexity": "O(N). Where N is the number of configured users.", - "since": "6.0.0", - "group": "server" - }, - "ACL LIST": { - "summary": "List the current ACL rules in ACL config file format", - "complexity": "O(N). Where N is the number of configured users.", - "since": "6.0.0", - "group": "server" - }, - "ACL USERS": { - "summary": "List the username of all the configured ACL rules", - "complexity": "O(N). Where N is the number of configured users.", - "since": "6.0.0", - "group": "server" - }, - "ACL GETUSER": { - "summary": "Get the rules for a specific ACL user", - "complexity": "O(N). Where N is the number of password, command and pattern rules that the user has.", - "arguments": [ - { - "name": "username", - "type": "string" - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL SETUSER": { - "summary": "Modify or create the rules for a specific ACL user", - "complexity": "O(N). Where N is the number of rules provided.", - "arguments": [ - { - "name": "username", - "type": "string" - }, - { - "name": "rule", - "type": "string", - "multiple": true, - "optional": true - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL DELUSER": { - "summary": "Remove the specified ACL users and the associated rules", - "complexity": "O(1) amortized time considering the typical user.", - "arguments": [ - { - "name": "username", - "type": "string", - "multiple": true - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL CAT": { - "summary": "List the ACL categories or the commands inside a category", - "complexity": "O(1) since the categories and commands are a fixed set.", - "arguments": [ - { - "name": "categoryname", - "type": "string", - "optional": true - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL GENPASS": { - "summary": "Generate a pseudorandom secure password to use for ACL users", - "complexity": "O(1)", - "arguments": [ - { - "name": "bits", - "type": "integer", - "optional": true - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL WHOAMI": { - "summary": "Return the name of the user associated to the current connection", - "complexity": "O(1)", - "since": "6.0.0", - "group": "server" - }, - "ACL LOG": { - "summary": "List latest events denied because of ACLs in place", - "complexity": "O(N) with N being the number of entries shown.", - "arguments": [ - { - "name": "count or RESET", - "type": "string", - "optional": true - } - ], - "since": "6.0.0", - "group": "server" - }, - "ACL HELP": { - "summary": "Show helpful text about the different subcommands", - "complexity": "O(1)", - "since": "6.0.0", - "group": "server" - }, - "APPEND": { - "summary": "Append a value to a key", - "complexity": "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.0.0", - "group": "string" - }, - "AUTH": { - "summary": "Authenticate to the server", - "arguments": [ - { - "name": "username", - "type": "string", - "optional": true - }, - { - "name": "password", - "type": "string" - } - ], - "since": "1.0.0", - "group": "connection" - }, - "BGREWRITEAOF": { - "summary": "Asynchronously rewrite the append-only file", - "since": "1.0.0", - "group": "server" - }, - "BGSAVE": { - "summary": "Asynchronously save the dataset to disk", - "arguments": [ - { - "name": "schedule", - "type": "enum", - "enum": [ - "SCHEDULE" - ], - "optional": true - } - ], - "since": "1.0.0", - "group": "server" - }, - "BITCOUNT": { - "summary": "Count set bits in a string", - "complexity": "O(N)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": [ - "start", - "end" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "2.6.0", - "group": "bitmap" - }, - "BITFIELD": { - "summary": "Perform arbitrary bitfield integer operations on strings", - "complexity": "O(1) for each subcommand specified", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "command": "GET", - "name": [ - "type", - "offset" - ], - "type": [ - "type", - "integer" - ], - "optional": true - }, - { - "command": "SET", - "name": [ - "type", - "offset", - "value" - ], - "type": [ - "type", - "integer", - "integer" - ], - "optional": true - }, - { - "command": "INCRBY", - "name": [ - "type", - "offset", - "increment" - ], - "type": [ - "type", - "integer", - "integer" - ], - "optional": true - }, - { - "command": "OVERFLOW", - "type": "enum", - "enum": [ - "WRAP", - "SAT", - "FAIL" - ], - "optional": true - } - ], - "since": "3.2.0", - "group": "bitmap" - }, - "BITOP": { - "summary": "Perform bitwise operations between strings", - "complexity": "O(N)", - "arguments": [ - { - "name": "operation", - "type": "string" - }, - { - "name": "destkey", - "type": "key" - }, - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "2.6.0", - "group": "bitmap" - }, - "BITPOS": { - "summary": "Find first bit set or clear in a string", - "complexity": "O(N)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "bit", - "type": "integer" - }, - { - "name": "index", - "type": "block", - "optional": true, - "block": [ - { - "name": "start", - "type": "integer" - }, - { - "name": "end", - "type": "integer", - "optional": true - } - ] - } - ], - "since": "2.8.7", - "group": "bitmap" - }, - "BLPOP": { - "summary": "Remove and get the first element in a list, or block until one is available", - "complexity": "O(N) where N is the number of provided keys.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "2.0.0", - "group": "list" - }, - "BRPOP": { - "summary": "Remove and get the last element in a list, or block until one is available", - "complexity": "O(N) where N is the number of provided keys.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "2.0.0", - "group": "list" - }, - "BRPOPLPUSH": { - "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", - "complexity": "O(1)", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "2.2.0", - "group": "list" - }, - "BLMOVE": { - "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", - "complexity": "O(1)", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - }, - { - "name": "wherefrom", - "type": "enum", - "enum": [ - "LEFT", - "RIGHT" - ] - }, - { - "name": "whereto", - "type": "enum", - "enum": [ - "LEFT", - "RIGHT" - ] - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "6.2.0", - "group": "list" - }, - "BZPOPMIN": { - "summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", - "complexity": "O(log(N)) with N being the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "5.0.0", - "group": "sorted_set" - }, - "BZPOPMAX": { - "summary": "Remove and return the member with the highest score from one or more sorted sets, or block until one is available", - "complexity": "O(log(N)) with N being the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "timeout", - "type": "double" - } - ], - "since": "5.0.0", - "group": "sorted_set" - }, - "CLIENT CACHING": { - "summary": "Instruct the server about tracking or not keys in the next request", - "complexity": "O(1)", - "arguments": [ - { - "name": "mode", - "type": "enum", - "enum": [ - "YES", - "NO" - ] - } - ], - "since": "6.0.0", - "group": "connection" - }, - "CLIENT ID": { - "summary": "Returns the client ID for the current connection", - "complexity": "O(1)", - "since": "5.0.0", - "group": "connection" - }, - "CLIENT INFO": { - "summary": "Returns information about the current client connection.", - "complexity": "O(1)", - "since": "6.2.0", - "group": "connection" - }, - "CLIENT KILL": { - "summary": "Kill the connection of a client", - "complexity": "O(N) where N is the number of client connections", - "arguments": [ - { - "name": "ip:port", - "type": "string", - "optional": true - }, - { - "command": "ID", - "name": "client-id", - "type": "integer", - "optional": true - }, - { - "command": "TYPE", - "type": "enum", - "enum": [ - "normal", - "master", - "slave", - "pubsub" - ], - "optional": true - }, - { - "command": "USER", - "name": "username", - "type": "string", - "optional": true - }, - { - "command": "ADDR", - "name": "ip:port", - "type": "string", - "optional": true - }, - { - "command": "LADDR", - "name": "ip:port", - "type": "string", - "optional": true - }, - { - "command": "SKIPME", - "name": "yes/no", - "type": "string", - "optional": true - } - ], - "since": "2.4.0", - "group": "connection" - }, - "CLIENT LIST": { - "summary": "Get the list of client connections", - "complexity": "O(N) where N is the number of client connections", - "arguments": [ - { - "command": "TYPE", - "type": "enum", - "enum": [ - "normal", - "master", - "replica", - "pubsub" - ], - "optional": true - }, - { - "name": "id", - "type": "block", - "block": [ - { - "command": "ID" - }, - { - "name": "client-id", - "type": "integer", - "multiple": true - } - ], - "optional": true - } - ], - "since": "2.4.0", - "group": "connection" - }, - "CLIENT GETNAME": { - "summary": "Get the current connection name", - "complexity": "O(1)", - "since": "2.6.9", - "group": "connection" - }, - "CLIENT GETREDIR": { - "summary": "Get tracking notifications redirection client ID if any", - "complexity": "O(1)", - "since": "6.0.0", - "group": "connection" - }, - "CLIENT UNPAUSE": { - "summary": "Resume processing of clients that were paused", - "complexity": "O(N) Where N is the number of paused clients", - "since": "6.2.0", - "group": "connection" - }, - "CLIENT PAUSE": { - "summary": "Stop processing commands from clients for some time", - "complexity": "O(1)", - "arguments": [ - { - "name": "timeout", - "type": "integer" - }, - { - "name": "mode", - "type": "enum", - "optional": true, - "enum": [ - "WRITE", - "ALL" - ] - } - ], - "since": "2.9.50", - "group": "connection" - }, - "CLIENT REPLY": { - "summary": "Instruct the server whether to reply to commands", - "complexity": "O(1)", - "arguments": [ - { - "name": "reply-mode", - "type": "enum", - "enum": [ - "ON", - "OFF", - "SKIP" - ] - } - ], - "since": "3.2.0", - "group": "connection" - }, - "CLIENT SETNAME": { - "summary": "Set the current connection name", - "complexity": "O(1)", - "since": "2.6.9", - "arguments": [ - { - "name": "connection-name", - "type": "string" - } - ], - "group": "connection" - }, - "CLIENT TRACKING": { - "summary": "Enable or disable server assisted client side caching support", - "complexity": "O(1). Some options may introduce additional complexity.", - "arguments": [ - { - "name": "status", - "type": "enum", - "enum": [ - "ON", - "OFF" - ] - }, - { - "command": "REDIRECT", - "name": "client-id", - "type": "integer", - "optional": true - }, - { - "command": "PREFIX", - "name": "prefix", - "type": "string", - "optional": true, - "multiple": true - }, - { - "name": "BCAST", - "type": "enum", - "enum": [ - "BCAST" - ], - "optional": true - }, - { - "name": "OPTIN", - "type": "enum", - "enum": [ - "OPTIN" - ], - "optional": true - }, - { - "name": "OPTOUT", - "type": "enum", - "enum": [ - "OPTOUT" - ], - "optional": true - }, - { - "name": "NOLOOP", - "type": "enum", - "enum": [ - "NOLOOP" - ], - "optional": true - } - ], - "since": "6.0.0", - "group": "connection" - }, - "CLIENT TRACKINGINFO": { - "summary": "Return information about server assisted client side caching for the current connection", - "complexity": "O(1)", - "since": "6.2.0", - "group": "connection" - }, - "CLIENT UNBLOCK": { - "summary": "Unblock a client blocked in a blocking command from a different connection", - "complexity": "O(log N) where N is the number of client connections", - "arguments": [ - { - "name": "client-id", - "type": "integer" - }, - { - "name": "unblock-type", - "type": "enum", - "enum": [ - "TIMEOUT", - "ERROR" - ], - "optional": true - } - ], - "since": "5.0.0", - "group": "connection" - }, - "CLUSTER ADDSLOTS": { - "summary": "Assign new hash slots to receiving node", - "complexity": "O(N) where N is the total number of hash slot arguments", - "arguments": [ - { - "name": "slot", - "type": "integer", - "multiple": true - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER BUMPEPOCH": { - "summary": "Advance the cluster config epoch", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER COUNT-FAILURE-REPORTS": { - "summary": "Return the number of failure reports active for a given node", - "complexity": "O(N) where N is the number of failure reports", - "arguments": [ - { - "name": "node-id", - "type": "string" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER COUNTKEYSINSLOT": { - "summary": "Return the number of local keys in the specified hash slot", - "complexity": "O(1)", - "arguments": [ - { - "name": "slot", - "type": "integer" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER DELSLOTS": { - "summary": "Set hash slots as unbound in receiving node", - "complexity": "O(N) where N is the total number of hash slot arguments", - "arguments": [ - { - "name": "slot", - "type": "integer", - "multiple": true - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER FAILOVER": { - "summary": "Forces a replica to perform a manual failover of its master.", - "complexity": "O(1)", - "arguments": [ - { - "name": "options", - "type": "enum", - "enum": [ - "FORCE", - "TAKEOVER" - ], - "optional": true - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER FLUSHSLOTS": { - "summary": "Delete a node's own slots information", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER FORGET": { - "summary": "Remove a node from the nodes table", - "complexity": "O(1)", - "arguments": [ - { - "name": "node-id", - "type": "string" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER GETKEYSINSLOT": { - "summary": "Return local key names in the specified hash slot", - "complexity": "O(log(N)) where N is the number of requested keys", - "arguments": [ - { - "name": "slot", - "type": "integer" - }, - { - "name": "count", - "type": "integer" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER INFO": { - "summary": "Provides info about Redis Cluster node state", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER KEYSLOT": { - "summary": "Returns the hash slot of the specified key", - "complexity": "O(N) where N is the number of bytes in the key", - "arguments": [ - { - "name": "key", - "type": "string" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER MEET": { - "summary": "Force a node cluster to handshake with another node", - "complexity": "O(1)", - "arguments": [ - { - "name": "ip", - "type": "string" - }, - { - "name": "port", - "type": "integer" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER MYID": { - "summary": "Return the node id", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER NODES": { - "summary": "Get Cluster config for the node", - "complexity": "O(N) where N is the total number of Cluster nodes", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER REPLICATE": { - "summary": "Reconfigure a node as a replica of the specified master node", - "complexity": "O(1)", - "arguments": [ - { - "name": "node-id", - "type": "string" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER RESET": { - "summary": "Reset a Redis Cluster node", - "complexity": "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.", - "arguments": [ - { - "name": "reset-type", - "type": "enum", - "enum": [ - "HARD", - "SOFT" - ], - "optional": true - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER SAVECONFIG": { - "summary": "Forces the node to save cluster state on disk", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER SET-CONFIG-EPOCH": { - "summary": "Set the configuration epoch in a new node", - "complexity": "O(1)", - "arguments": [ - { - "name": "config-epoch", - "type": "integer" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER SETSLOT": { - "summary": "Bind a hash slot to a specific node", - "complexity": "O(1)", - "arguments": [ - { - "name": "slot", - "type": "integer" - }, - { - "name": "subcommand", - "type": "enum", - "enum": [ - "IMPORTING", - "MIGRATING", - "STABLE", - "NODE" - ] - }, - { - "name": "node-id", - "type": "string", - "optional": true - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER SLAVES": { - "summary": "List replica nodes of the specified master node", - "complexity": "O(1)", - "arguments": [ - { - "name": "node-id", - "type": "string" - } - ], - "since": "3.0.0", - "group": "cluster" - }, - "CLUSTER REPLICAS": { - "summary": "List replica nodes of the specified master node", - "complexity": "O(1)", - "arguments": [ - { - "name": "node-id", - "type": "string" - } - ], - "since": "5.0.0", - "group": "cluster" - }, - "CLUSTER SLOTS": { - "summary": "Get array of Cluster slot to node mappings", - "complexity": "O(N) where N is the total number of Cluster nodes", - "since": "3.0.0", - "group": "cluster" - }, - "COMMAND": { - "summary": "Get array of Redis command details", - "complexity": "O(N) where N is the total number of Redis commands", - "since": "2.8.13", - "group": "server" - }, - "COMMAND COUNT": { - "summary": "Get total number of Redis commands", - "complexity": "O(1)", - "since": "2.8.13", - "group": "server" - }, - "COMMAND GETKEYS": { - "summary": "Extract keys given a full Redis command", - "complexity": "O(N) where N is the number of arguments to the command", - "since": "2.8.13", - "group": "server" - }, - "COMMAND INFO": { - "summary": "Get array of specific Redis command details", - "complexity": "O(N) when N is number of commands to look up", - "since": "2.8.13", - "arguments": [ - { - "name": "command-name", - "type": "string", - "multiple": true - } - ], - "group": "server" - }, - "CONFIG GET": { - "summary": "Get the value of a configuration parameter", - "arguments": [ - { - "name": "parameter", - "type": "string" - } - ], - "since": "2.0.0", - "group": "server" - }, - "CONFIG REWRITE": { - "summary": "Rewrite the configuration file with the in memory configuration", - "since": "2.8.0", - "group": "server" - }, - "CONFIG SET": { - "summary": "Set a configuration parameter to the given value", - "arguments": [ - { - "name": "parameter", - "type": "string" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.0.0", - "group": "server" - }, - "CONFIG RESETSTAT": { - "summary": "Reset the stats returned by INFO", - "complexity": "O(1)", - "since": "2.0.0", - "group": "server" - }, - "COPY": { - "summary": "Copy a key", - "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.", - "since": "6.2.0", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - }, - { - "command": "DB", - "name": "destination-db", - "type": "integer", - "optional": true - }, - { - "name": "replace", - "type": "enum", - "enum": [ - "REPLACE" - ], - "optional": true - } - ], - "group": "generic" - }, - "DBSIZE": { - "summary": "Return the number of keys in the selected database", - "since": "1.0.0", - "group": "server" - }, - "DEBUG OBJECT": { - "summary": "Get debugging information about a key", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "server" - }, - "DEBUG SEGFAULT": { - "summary": "Make the server crash", - "since": "1.0.0", - "group": "server" - }, - "DECR": { - "summary": "Decrement the integer value of a key by one", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "string" - }, - "DECRBY": { - "summary": "Decrement the integer value of a key by the given number", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "decrement", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "string" - }, - "DEL": { - "summary": "Delete a key", - "complexity": "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1).", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "generic" - }, - "DISCARD": { - "summary": "Discard all commands issued after MULTI", - "since": "2.0.0", - "group": "transactions" - }, - "DUMP": { - "summary": "Return a serialized version of the value stored at the specified key.", - "complexity": "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.6.0", - "group": "generic" - }, - "ECHO": { - "summary": "Echo the given string", - "arguments": [ - { - "name": "message", - "type": "string" - } - ], - "since": "1.0.0", - "group": "connection" - }, - "EVAL": { - "summary": "Execute a Lua script server side", - "complexity": "Depends on the script that is executed.", - "arguments": [ - { - "name": "script", - "type": "string" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "optional": true, - "multiple": true - }, - { - "name": "arg", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.6.0", - "group": "scripting" - }, - "EVAL_RO": { - "summary": "Execute a read-only Lua script server side", - "complexity": "Depends on the script that is executed.", - "arguments": [ - { - "name": "script", - "type": "string" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "arg", - "type": "string", - "multiple": true - } - ], - "since": "7.0.0", - "group": "scripting" - }, - "EVALSHA": { - "summary": "Execute a Lua script server side", - "complexity": "Depends on the script that is executed.", - "arguments": [ - { - "name": "sha1", - "type": "string" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "optional": true, - "multiple": true - }, - { - "name": "arg", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.6.0", - "group": "scripting" - }, - "EVALSHA_RO": { - "summary": "Execute a read-only Lua script server side", - "complexity": "Depends on the script that is executed.", - "arguments": [ - { - "name": "sha1", - "type": "string" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "arg", - "type": "string", - "multiple": true - } - ], - "since": "7.0.0", - "group": "scripting" - }, - "EXEC": { - "summary": "Execute all commands issued after MULTI", - "since": "1.2.0", - "group": "transactions" - }, - "EXISTS": { - "summary": "Determine if a key exists", - "complexity": "O(N) where N is the number of keys to check.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "generic" - }, - "EXPIRE": { - "summary": "Set a key's time to live in seconds", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "seconds", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "EXPIREAT": { - "summary": "Set the expiration for a key as a UNIX timestamp", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "timestamp", - "type": "posix time" - } - ], - "since": "1.2.0", - "group": "generic" - }, - "EXPIRETIME": { - "summary": "Get the expiration Unix timestamp for a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "7.0.0", - "group": "generic" - }, - "FAILOVER": { - "summary": "Start a coordinated failover between this server and one of its replicas.", - "arguments": [ - { - "name": "target", - "type": "block", - "optional": true, - "block": [ - { - "command": "TO" - }, - { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "integer" - }, - { - "command": "FORCE", - "optional": true - } - ] - }, - { - "command": "ABORT", - "optional": true - }, - { - "command": "TIMEOUT", - "name": "milliseconds", - "type": "integer", - "optional": true - } - ], - "since": "6.2.0", - "group": "server" - }, - "FLUSHALL": { - "summary": "Remove all keys from all databases", - "complexity": "O(N) where N is the total number of keys in all databases", - "arguments": [ - { - "name": "async", - "type": "enum", - "enum": [ - "ASYNC", - "SYNC" - ], - "optional": true - } - ], - "since": "1.0.0", - "group": "server" - }, - "FLUSHDB": { - "summary": "Remove all keys from the current database", - "complexity": "O(N) where N is the number of keys in the selected database", - "arguments": [ - { - "name": "async", - "type": "enum", - "enum": [ - "ASYNC", - "SYNC" - ], - "optional": true - } - ], - "since": "1.0.0", - "group": "server" - }, - "GEOADD": { - "summary": "Add one or more geospatial items in the geospatial index represented using a sorted set", - "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "condition", - "type": "enum", - "enum": [ - "NX", - "XX" - ], - "optional": true - }, - { - "name": "change", - "type": "enum", - "enum": [ - "CH" - ], - "optional": true - }, - { - "name": [ - "longitude", - "latitude", - "member" - ], - "type": [ - "double", - "double", - "string" - ], - "multiple": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEOHASH": { - "summary": "Returns members of a geospatial index as standard geohash strings", - "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEOPOS": { - "summary": "Returns longitude and latitude of members of a geospatial index", - "complexity": "O(N) where N is the number of members requested.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEODIST": { - "summary": "Returns the distance between two members of a geospatial index", - "complexity": "O(log(N))", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member1", - "type": "string" - }, - { - "name": "member2", - "type": "string" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" - ], - "optional": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEORADIUS": { - "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", - "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "longitude", - "type": "double" - }, - { - "name": "latitude", - "type": "double" - }, - { - "name": "radius", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" - ] - }, - { - "name": "withcoord", - "type": "enum", - "enum": [ - "WITHCOORD" - ], - "optional": true - }, - { - "name": "withdist", - "type": "enum", - "enum": [ - "WITHDIST" - ], - "optional": true - }, - { - "name": "withhash", - "type": "enum", - "enum": [ - "WITHHASH" - ], - "optional": true - }, - { - "type": "block", - "name": "count", - "block": [ - { - "name": "count", - "command": "COUNT", - "type": "integer" - }, - { - "name": "any", - "type": "enum", - "enum": [ - "ANY" + "ACL": { + "summary": "A container for Access List Control commands ", + "since": "6.0.0", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "ACL CAT": { + "summary": "List the ACL categories or the commands inside a category", + "since": "6.0.0", + "group": "server", + "complexity": "O(1) since the categories and commands are a fixed set.", + "acl_categories": [ + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "categoryname", + "type": "string", + "optional": true + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "ACL DELUSER": { + "summary": "Remove the specified ACL users and the associated rules", + "since": "6.0.0", + "group": "server", + "complexity": "O(1) amortized time considering the typical user.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "username", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL DRYRUN": { + "summary": "Returns whether the user can execute the given command without executing the command.", + "since": "7.0.0", + "group": "server", + "complexity": "O(1).", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "command", + "type": "string" + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL GENPASS": { + "summary": "Generate a pseudorandom secure password to use for ACL users", + "since": "6.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "bits", + "type": "integer", + "optional": true + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "ACL GETUSER": { + "summary": "Get the rules for a specific ACL user", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of password, command and pattern rules that the user has.", + "history": [ + [ + "6.2.0", + "Added Pub/Sub channel patterns." + ], + [ + "7.0.0", + "Added selectors and changed the format of key and channel patterns from a list to their rule representation." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "username", + "type": "string" + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "6.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "ACL LIST": { + "summary": "List the current ACL rules in ACL config file format", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of configured users.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL LOAD": { + "summary": "Reload the ACLs from the configured ACL file", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of configured users.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL LOG": { + "summary": "List latest events denied because of ACLs in place", + "since": "6.0.0", + "group": "server", + "complexity": "O(N) with N being the number of entries shown.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "operation", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "reset", + "type": "pure-token", + "token": "RESET" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL SAVE": { + "summary": "Save the current ACL rules in the configured ACL file", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of configured users.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL SETUSER": { + "summary": "Modify or create the rules for a specific ACL user", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of rules provided.", + "history": [ + [ + "6.2.0", + "Added Pub/Sub channel patterns." + ], + [ + "7.0.0", + "Added selectors and key based permissions." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "rule", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL USERS": { + "summary": "List the username of all the configured ACL rules", + "since": "6.0.0", + "group": "server", + "complexity": "O(N). Where N is the number of configured users.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "ACL WHOAMI": { + "summary": "Return the name of the user associated to the current connection", + "since": "6.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "APPEND": { + "summary": "Append a value to a key", + "since": "2.0.0", + "group": "string", + "complexity": "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation.", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "ASKING": { + "summary": "Sent by cluster clients after an -ASK redirect", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 1, + "command_flags": [ + "fast" + ] + }, + "AUTH": { + "summary": "Authenticate to the server", + "since": "1.0.0", + "group": "connection", + "complexity": "O(N) where N is the number of passwords defined for the user", + "history": [ + [ + "6.0.0", + "Added ACL style (username and password)." + ] + ], + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": -2, + "arguments": [ + { + "name": "username", + "type": "string", + "since": "6.0.0", + "optional": true + }, + { + "name": "password", + "type": "string" + } + ], + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "no_auth", + "allow_busy" + ] + }, + "BGREWRITEAOF": { + "summary": "Asynchronously rewrite the append-only file", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "admin", + "noscript", + "no_async_loading" + ] + }, + "BGSAVE": { + "summary": "Asynchronously save the dataset to disk", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "history": [ + [ + "3.2.2", + "Added the `SCHEDULE` option." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "schedule", + "type": "pure-token", + "token": "SCHEDULE", + "since": "3.2.2", + "optional": true + } + ], + "command_flags": [ + "admin", + "noscript", + "no_async_loading" + ] + }, + "BITCOUNT": { + "summary": "Count set bits in a string", + "since": "2.6.0", + "group": "bitmap", + "complexity": "O(N)", + "history": [ + [ + "7.0.0", + "Added the `BYTE|BIT` option." + ] + ], + "acl_categories": [ + "@read", + "@bitmap", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + }, + { + "name": "index_unit", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] + } + ] + } + ], + "command_flags": [ + "readonly" + ] + }, + "BITFIELD": { + "summary": "Perform arbitrary bitfield integer operations on strings", + "since": "3.2.0", + "group": "bitmap", + "complexity": "O(1) for each subcommand specified", + "acl_categories": [ + "@write", + "@bitmap", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "notes": "This command allows both access and modification of the key", + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true, + "variable_flags": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "operation", + "type": "oneof", + "multiple": true, + "arguments": [ + { + "name": "encoding_offset", + "type": "block", + "token": "GET", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ] + }, + { + "name": "write", + "type": "block", + "arguments": [ + { + "name": "wrap_sat_fail", + "type": "oneof", + "token": "OVERFLOW", + "optional": true, + "arguments": [ + { + "name": "wrap", + "type": "pure-token", + "token": "WRAP" + }, + { + "name": "sat", + "type": "pure-token", + "token": "SAT" + }, + { + "name": "fail", + "type": "pure-token", + "token": "FAIL" + } + ] + }, + { + "name": "write_operation", + "type": "oneof", + "arguments": [ + { + "name": "encoding_offset_value", + "type": "block", + "token": "SET", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "integer" + } + ] + }, + { + "name": "encoding_offset_increment", + "type": "block", + "token": "INCRBY", + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "increment", + "type": "integer" + } + ] + } + ] + } + ] + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "BITFIELD_RO": { + "summary": "Perform arbitrary bitfield integer operations on strings. Read-only variant of BITFIELD", + "since": "6.0.0", + "group": "bitmap", + "complexity": "O(1) for each subcommand specified", + "acl_categories": [ + "@read", + "@bitmap", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "encoding_offset", + "type": "block", + "token": "GET", + "multiple": true, + "multiple_token": true, + "arguments": [ + { + "name": "encoding", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "BITOP": { + "summary": "Perform bitwise operations between strings", + "since": "2.6.0", + "group": "bitmap", + "complexity": "O(N)", + "acl_categories": [ + "@write", + "@bitmap", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 3 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "operation", + "type": "string" + }, + { + "name": "destkey", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "BITPOS": { + "summary": "Find first bit set or clear in a string", + "since": "2.8.7", + "group": "bitmap", + "complexity": "O(N)", + "history": [ + [ + "7.0.0", + "Added the `BYTE|BIT` option." + ] + ], + "acl_categories": [ + "@read", + "@bitmap", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "bit", + "type": "integer" + }, + { + "name": "index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "start", + "type": "integer" + }, + { + "name": "end_index", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "end", + "type": "integer" + }, + { + "name": "index_unit", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] + } + ] + } + ] + } + ], + "command_flags": [ + "readonly" + ] + }, + "BLMOVE": { + "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "since": "6.2.0", + "group": "list", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@list", + "@slow", + "@blocking" + ], + "arity": 6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "wherefrom", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "whereto", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "denyoom", + "noscript", + "blocking" + ] + }, + "BLMPOP": { + "summary": "Pop elements from a list, or block until one is available", + "since": "7.0.0", + "group": "list", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "acl_categories": [ + "@write", + "@list", + "@slow", + "@blocking" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "timeout", + "type": "double" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "write", + "blocking", + "movablekeys" + ] + }, + "BLPOP": { + "summary": "Remove and get the first element in a list, or block until one is available", + "since": "2.0.0", + "group": "list", + "complexity": "O(N) where N is the number of provided keys.", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@slow", + "@blocking" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -2, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "noscript", + "blocking" + ] + }, + "BRPOP": { + "summary": "Remove and get the last element in a list, or block until one is available", + "since": "2.0.0", + "group": "list", + "complexity": "O(N) where N is the number of provided keys.", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@slow", + "@blocking" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -2, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "noscript", + "blocking" + ] + }, + "BRPOPLPUSH": { + "summary": "Pop an element from a list, push it to another list and return it; or block until one is available", + "since": "2.2.0", + "group": "list", + "complexity": "O(1)", + "deprecated_since": "6.2.0", + "replaced_by": "`BLMOVE` with the `RIGHT` and `LEFT` arguments", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@slow", + "@blocking" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "denyoom", + "noscript", + "blocking" + ], + "doc_flags": [ + "deprecated" + ] + }, + "BZMPOP": { + "summary": "Remove and return members with scores in a sorted set or block until one is available", + "since": "7.0.0", + "group": "sorted-set", + "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow", + "@blocking" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "timeout", + "type": "double" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "write", + "blocking", + "movablekeys" + ] + }, + "BZPOPMAX": { + "summary": "Remove and return the member with the highest score from one or more sorted sets, or block until one is available", + "since": "5.0.0", + "group": "sorted-set", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "acl_categories": [ + "@write", + "@sortedset", + "@fast", + "@blocking" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -2, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "noscript", + "blocking", + "fast" + ] + }, + "BZPOPMIN": { + "summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", + "since": "5.0.0", + "group": "sorted-set", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "history": [ + [ + "6.0.0", + "`timeout` is interpreted as a double instead of an integer." + ] + ], + "acl_categories": [ + "@write", + "@sortedset", + "@fast", + "@blocking" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -2, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "timeout", + "type": "double" + } + ], + "command_flags": [ + "write", + "noscript", + "blocking", + "fast" + ] + }, + "CLIENT": { + "summary": "A container for client connection commands", + "since": "2.4.0", + "group": "connection", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "CLIENT CACHING": { + "summary": "Instruct the server about tracking or not keys in the next request", + "since": "6.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "mode", + "type": "oneof", + "arguments": [ + { + "name": "yes", + "type": "pure-token", + "token": "YES" + }, + { + "name": "no", + "type": "pure-token", + "token": "NO" + } + ] + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT GETNAME": { + "summary": "Get the current connection name", + "since": "2.6.9", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT GETREDIR": { + "summary": "Get tracking notifications redirection client ID if any", + "since": "6.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "CLIENT ID": { + "summary": "Returns the client ID for the current connection", + "since": "5.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT INFO": { + "summary": "Returns information about the current client connection.", + "since": "6.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLIENT KILL": { + "summary": "Kill the connection of a client", + "since": "2.4.0", + "group": "connection", + "complexity": "O(N) where N is the number of client connections", + "history": [ + [ + "2.8.12", + "Added new filter format." + ], + [ + "2.8.12", + "`ID` option." + ], + [ + "3.2.0", + "Added `master` type in for `TYPE` option." + ], + [ + "5.0.0", + "Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility." + ], + [ + "6.2.0", + "`LADDR` option." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": -3, + "arguments": [ + { + "name": "ip:port", + "type": "string", + "optional": true + }, + { + "name": "client-id", + "type": "integer", + "token": "ID", + "since": "2.8.12", + "optional": true + }, + { + "name": "normal_master_slave_pubsub", + "type": "oneof", + "token": "TYPE", + "since": "2.8.12", + "optional": true, + "arguments": [ + { + "name": "normal", + "type": "pure-token", + "token": "NORMAL" + }, + { + "name": "master", + "type": "pure-token", + "token": "MASTER", + "since": "3.2.0" + }, + { + "name": "slave", + "type": "pure-token", + "token": "SLAVE" + }, + { + "name": "replica", + "type": "pure-token", + "token": "REPLICA", + "since": "5.0.0" + }, + { + "name": "pubsub", + "type": "pure-token", + "token": "PUBSUB" + } + ] + }, + { + "name": "username", + "type": "string", + "token": "USER", + "optional": true + }, + { + "name": "ip:port", + "type": "string", + "token": "ADDR", + "optional": true + }, + { + "name": "ip:port", + "type": "string", + "token": "LADDR", + "since": "6.2.0", + "optional": true + }, + { + "name": "yes/no", + "type": "string", + "token": "SKIPME", + "optional": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CLIENT LIST": { + "summary": "Get the list of client connections", + "since": "2.4.0", + "group": "connection", + "complexity": "O(N) where N is the number of client connections", + "history": [ + [ + "2.8.12", + "Added unique client `id` field." ], - "optional": true - } - ], - "optional": true - }, - { - "name": "order", - "type": "enum", - "enum": [ - "ASC", - "DESC" - ], - "optional": true - }, - { - "command": "STORE", - "name": "key", - "type": "key", - "optional": true - }, - { - "command": "STOREDIST", - "name": "key", - "type": "key", - "optional": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEORADIUSBYMEMBER": { - "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", - "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string" - }, - { - "name": "radius", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" - ] - }, - { - "name": "withcoord", - "type": "enum", - "enum": [ - "WITHCOORD" - ], - "optional": true - }, - { - "name": "withdist", - "type": "enum", - "enum": [ - "WITHDIST" - ], - "optional": true - }, - { - "name": "withhash", - "type": "enum", - "enum": [ - "WITHHASH" - ], - "optional": true - }, - { - "type": "block", - "name": "count", - "block": [ - { - "name": "count", - "command": "COUNT", - "type": "integer" - }, - { - "name": "any", - "type": "enum", - "enum": [ - "ANY" + [ + "5.0.0", + "Added optional `TYPE` filter." ], - "optional": true - } - ], - "optional": true - }, - { - "name": "order", - "type": "enum", - "enum": [ - "ASC", - "DESC" - ], - "optional": true - }, - { - "command": "STORE", - "name": "key", - "type": "key", - "optional": true - }, - { - "command": "STOREDIST", - "name": "key", - "type": "key", - "optional": true - } - ], - "since": "3.2.0", - "group": "geo" - }, - "GEOSEARCH": { - "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.", - "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "command": "FROMMEMBER", - "name": "member", - "type": "string", - "optional": true - }, - { - "command": "FROMLONLAT", - "name": [ - "longitude", - "latitude" - ], - "type": [ - "double", - "double" - ], - "optional": true - }, - { - "type": "block", - "name": "circle", - "block": [ - { - "name": "radius", - "command": "BYRADIUS", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" + [ + "6.2.0", + "Added `laddr` field and the optional `ID` filter." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": -2, + "arguments": [ + { + "name": "normal_master_replica_pubsub", + "type": "oneof", + "token": "TYPE", + "since": "5.0.0", + "optional": true, + "arguments": [ + { + "name": "normal", + "type": "pure-token", + "token": "NORMAL" + }, + { + "name": "master", + "type": "pure-token", + "token": "MASTER" + }, + { + "name": "replica", + "type": "pure-token", + "token": "REPLICA" + }, + { + "name": "pubsub", + "type": "pure-token", + "token": "PUBSUB" + } + ] + }, + { + "name": "id", + "type": "block", + "token": "ID", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "client-id", + "type": "integer", + "multiple": true + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLIENT NO-EVICT": { + "summary": "Set client eviction mode for the current connection", + "since": "7.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "enabled", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CLIENT PAUSE": { + "summary": "Stop processing commands from clients for some time", + "since": "2.9.50", + "group": "connection", + "complexity": "O(1)", + "history": [ + [ + "6.2.0", + "`CLIENT PAUSE WRITE` mode added along with the `mode` option." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": -3, + "arguments": [ + { + "name": "timeout", + "type": "integer" + }, + { + "name": "mode", + "type": "oneof", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "write", + "type": "pure-token", + "token": "WRITE" + }, + { + "name": "all", + "type": "pure-token", + "token": "ALL" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CLIENT REPLY": { + "summary": "Instruct the server whether to reply to commands", + "since": "3.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "on_off_skip", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + }, + { + "name": "skip", + "type": "pure-token", + "token": "SKIP" + } + ] + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT SETNAME": { + "summary": "Set the current connection name", + "since": "2.6.9", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "connection-name", + "type": "string" + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT TRACKING": { + "summary": "Enable or disable server assisted client side caching support", + "since": "6.0.0", + "group": "connection", + "complexity": "O(1). Some options may introduce additional complexity.", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -3, + "arguments": [ + { + "name": "status", + "type": "oneof", + "arguments": [ + { + "name": "on", + "type": "pure-token", + "token": "ON" + }, + { + "name": "off", + "type": "pure-token", + "token": "OFF" + } + ] + }, + { + "name": "client-id", + "type": "integer", + "token": "REDIRECT", + "optional": true + }, + { + "name": "prefix", + "type": "string", + "token": "PREFIX", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "bcast", + "type": "pure-token", + "token": "BCAST", + "optional": true + }, + { + "name": "optin", + "type": "pure-token", + "token": "OPTIN", + "optional": true + }, + { + "name": "optout", + "type": "pure-token", + "token": "OPTOUT", + "optional": true + }, + { + "name": "noloop", + "type": "pure-token", + "token": "NOLOOP", + "optional": true + } + ], + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT TRACKINGINFO": { + "summary": "Return information about server assisted client side caching for the current connection", + "since": "6.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "noscript", + "loading", + "stale" + ] + }, + "CLIENT UNBLOCK": { + "summary": "Unblock a client blocked in a blocking command from a different connection", + "since": "5.0.0", + "group": "connection", + "complexity": "O(log N) where N is the number of client connections", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": -3, + "arguments": [ + { + "name": "client-id", + "type": "integer" + }, + { + "name": "timeout_error", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "timeout", + "type": "pure-token", + "token": "TIMEOUT" + }, + { + "name": "error", + "type": "pure-token", + "token": "ERROR" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CLIENT UNPAUSE": { + "summary": "Resume processing of clients that were paused", + "since": "6.2.0", + "group": "connection", + "complexity": "O(N) Where N is the number of paused clients", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous", + "@connection" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CLUSTER": { + "summary": "A container for cluster commands", + "since": "3.0.0", + "group": "cluster", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "CLUSTER ADDSLOTS": { + "summary": "Assign new hash slots to receiving node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of hash slot arguments", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER ADDSLOTSRANGE": { + "summary": "Assign new hash slots to receiving node", + "since": "7.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "start-slot_end-slot", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "start-slot", + "type": "integer" + }, + { + "name": "end-slot", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER BUMPEPOCH": { + "summary": "Advance the cluster config epoch", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER COUNT-FAILURE-REPORTS": { + "summary": "Return the number of failure reports active for a given node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the number of failure reports", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "command_flags": [ + "admin", + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER COUNTKEYSINSLOT": { + "summary": "Return the number of local keys in the specified hash slot", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 3, + "arguments": [ + { + "name": "slot", + "type": "integer" + } + ], + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER DELSLOTS": { + "summary": "Set hash slots as unbound in receiving node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of hash slot arguments", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "slot", + "type": "integer", + "multiple": true + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER DELSLOTSRANGE": { + "summary": "Set hash slots as unbound in receiving node", + "since": "7.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of the slots between the start slot and end slot arguments.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "start-slot_end-slot", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "start-slot", + "type": "integer" + }, + { + "name": "end-slot", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER FAILOVER": { + "summary": "Forces a replica to perform a manual failover of its master.", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "options", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "force", + "type": "pure-token", + "token": "FORCE" + }, + { + "name": "takeover", + "type": "pure-token", + "token": "TAKEOVER" + } + ] + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER FLUSHSLOTS": { + "summary": "Delete a node's own slots information", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER FORGET": { + "summary": "Remove a node from the nodes table", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER GETKEYSINSLOT": { + "summary": "Return local key names in the specified hash slot", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(log(N)) where N is the number of requested keys", + "acl_categories": [ + "@slow" + ], + "arity": 4, + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ], + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "CLUSTER INFO": { + "summary": "Provides info about Redis Cluster node state", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER KEYSLOT": { + "summary": "Returns the hash slot of the specified key", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the number of bytes in the key", + "acl_categories": [ + "@slow" + ], + "arity": 3, + "arguments": [ + { + "name": "key", + "type": "string" + } + ], + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER LINKS": { + "summary": "Returns a list of all TCP links to and from peer nodes in cluster", + "since": "7.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of Cluster nodes", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER MEET": { + "summary": "Force a node cluster to handshake with another node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "history": [ + [ + "4.0.0", + "Added the optional `cluster_bus_port` argument." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "cluster_bus_port", + "type": "integer", + "since": "4.0.0", + "optional": true + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER MYID": { + "summary": "Return the node id", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER NODES": { + "summary": "Get Cluster config for the node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of Cluster nodes", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER REPLICAS": { + "summary": "List replica nodes of the specified master node", + "since": "5.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "command_flags": [ + "admin", + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER REPLICATE": { + "summary": "Reconfigure a node as a replica of the specified master node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER RESET": { + "summary": "Reset a Redis Cluster node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "hard_soft", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "hard", + "type": "pure-token", + "token": "HARD" + }, + { + "name": "soft", + "type": "pure-token", + "token": "SOFT" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SAVECONFIG": { + "summary": "Forces the node to save cluster state on disk", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SET-CONFIG-EPOCH": { + "summary": "Set the configuration epoch in a new node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "config-epoch", + "type": "integer" + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SETSLOT": { + "summary": "Bind a hash slot to a specific node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "slot", + "type": "integer" + }, + { + "name": "subcommand", + "type": "oneof", + "arguments": [ + { + "name": "node-id", + "type": "string", + "token": "IMPORTING" + }, + { + "name": "node-id", + "type": "string", + "token": "MIGRATING" + }, + { + "name": "node-id", + "type": "string", + "token": "NODE" + }, + { + "name": "stable", + "type": "pure-token", + "token": "STABLE" + } + ] + } + ], + "command_flags": [ + "admin", + "stale", + "no_async_loading" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SHARDS": { + "summary": "Get array of cluster slots to node mappings", + "since": "7.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of cluster nodes", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SLAVES": { + "summary": "List replica nodes of the specified master node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "deprecated_since": "5.0.0", + "replaced_by": "`CLUSTER REPLICAS`", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "node-id", + "type": "string" + } + ], + "command_flags": [ + "admin", + "stale" + ], + "doc_flags": [ + "deprecated" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "CLUSTER SLOTS": { + "summary": "Get array of Cluster slot to node mappings", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(N) where N is the total number of Cluster nodes", + "deprecated_since": "7.0.0", + "replaced_by": "`CLUSTER SHARDS`", + "history": [ + [ + "4.0.0", + "Added node IDs." + ], + [ + "7.0.0", + "Added additional networking metadata field." + ] + ], + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "stale" + ], + "doc_flags": [ + "deprecated" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "COMMAND": { + "summary": "Get array of Redis command details", + "since": "2.8.13", + "group": "server", + "complexity": "O(N) where N is the total number of Redis commands", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -1, + "command_flags": [ + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "COMMAND COUNT": { + "summary": "Get total number of Redis commands", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "COMMAND DOCS": { + "summary": "Get array of specific Redis command documentation", + "since": "7.0.0", + "group": "server", + "complexity": "O(N) where N is the number of commands to look up", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -2, + "arguments": [ + { + "name": "command-name", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "COMMAND GETKEYS": { + "summary": "Extract keys given a full Redis command", + "since": "2.8.13", + "group": "server", + "complexity": "O(N) where N is the number of arguments to the command", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -4, + "command_flags": [ + "loading", + "stale" + ] + }, + "COMMAND GETKEYSANDFLAGS": { + "summary": "Extract keys and access flags given a full Redis command", + "since": "7.0.0", + "group": "server", + "complexity": "O(N) where N is the number of arguments to the command", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -4, + "command_flags": [ + "loading", + "stale" + ] + }, + "COMMAND HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "COMMAND INFO": { + "summary": "Get array of specific Redis command details, or all when no argument is given.", + "since": "2.8.13", + "group": "server", + "complexity": "O(N) where N is the number of commands to look up", + "history": [ + [ + "7.0.0", + "Allowed to be called with no argument to get info on all commands." + ] + ], + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -2, + "arguments": [ + { + "name": "command-name", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "COMMAND LIST": { + "summary": "Get an array of Redis command names", + "since": "7.0.0", + "group": "server", + "complexity": "O(N) where N is the total number of Redis commands", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": -2, + "arguments": [ + { + "name": "filterby", + "type": "oneof", + "token": "FILTERBY", + "optional": true, + "arguments": [ + { + "name": "module-name", + "type": "string", + "token": "MODULE" + }, + { + "name": "category", + "type": "string", + "token": "ACLCAT" + }, + { + "name": "pattern", + "type": "pattern", + "token": "PATTERN" + } + ] + } + ], + "command_flags": [ + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "CONFIG": { + "summary": "A container for server configuration commands", + "since": "2.0.0", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "CONFIG GET": { + "summary": "Get the values of configuration parameters", + "since": "2.0.0", + "group": "server", + "complexity": "O(N) when N is the number of configuration parameters provided", + "history": [ + [ + "7.0.0", + "Added the ability to pass multiple pattern parameters in one call" + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "parameter", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "parameter", + "type": "string" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CONFIG HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "CONFIG RESETSTAT": { + "summary": "Reset the stats returned by INFO", + "since": "2.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CONFIG REWRITE": { + "summary": "Rewrite the configuration file with the in memory configuration", + "since": "2.8.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "CONFIG SET": { + "summary": "Set configuration parameters to the given values", + "since": "2.0.0", + "group": "server", + "complexity": "O(N) when N is the number of configuration parameters provided", + "history": [ + [ + "7.0.0", + "Added the ability to set multiple parameters in one call." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -4, + "arguments": [ + { + "name": "parameter_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "parameter", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, + "COPY": { + "summary": "Copy a key", + "since": "6.2.0", + "group": "generic", + "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.", + "acl_categories": [ + "@keyspace", + "@write", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "destination-db", + "type": "integer", + "token": "DB", + "optional": true + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "DBSIZE": { + "summary": "Return the number of keys in the selected database", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 1, + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:agg_sum" + ] + }, + "DEBUG": { + "summary": "A container for debugging commands", + "since": "1.0.0", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "doc_flags": [ + "syscmd" + ] + }, + "DECR": { + "summary": "Decrement the integer value of a key by one", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "DECRBY": { + "summary": "Decrement the integer value of a key by the given number", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "decrement", + "type": "integer" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "DEL": { + "summary": "Delete a key", + "since": "1.0.0", + "group": "generic", + "complexity": "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1).", + "acl_categories": [ + "@keyspace", + "@write", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RM": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "write" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:agg_sum" + ] + }, + "DISCARD": { + "summary": "Discard all commands issued after MULTI", + "since": "2.0.0", + "group": "transactions", + "complexity": "O(N), when N is the number of queued commands", + "acl_categories": [ + "@fast", + "@transaction" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "allow_busy" + ] + }, + "DUMP": { + "summary": "Return a serialized version of the value stored at the specified key.", + "since": "2.6.0", + "group": "generic", + "complexity": "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "ECHO": { + "summary": "Echo the given string", + "since": "1.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 2, + "arguments": [ + { + "name": "message", + "type": "string" + } + ], + "command_flags": [ + "loading", + "stale", + "fast" + ] + }, + "EVAL": { + "summary": "Execute a Lua script server side", + "since": "2.6.0", + "group": "scripting", + "complexity": "Depends on the script that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RW and UPDATE", + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "script", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "optional": true, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "EVALSHA": { + "summary": "Execute a Lua script server side", + "since": "2.6.0", + "group": "scripting", + "complexity": "Depends on the script that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "sha1", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "optional": true, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "EVALSHA_RO": { + "summary": "Execute a read-only Lua script server side", + "since": "7.0.0", + "group": "scripting", + "complexity": "Depends on the script that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "sha1", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "EVAL_RO": { + "summary": "Execute a read-only Lua script server side", + "since": "7.0.0", + "group": "scripting", + "complexity": "Depends on the script that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RO and ACCESS", + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "script", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "EXEC": { + "summary": "Execute all commands issued after MULTI", + "since": "1.2.0", + "group": "transactions", + "complexity": "Depends on commands in the transaction", + "acl_categories": [ + "@slow", + "@transaction" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "skip_slowlog" + ] + }, + "EXISTS": { + "summary": "Determine if a key exists", + "since": "1.0.0", + "group": "generic", + "complexity": "O(N) where N is the number of keys to check.", + "history": [ + [ + "3.0.3", + "Accepts multiple `key` arguments." ] - } - ], - "optional": true - }, - { - "type": "block", - "name": "box", - "block": [ - { - "name": "width", - "command": "BYBOX", - "type": "double" - }, - { - "name": "height", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" + ], + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:agg_sum" + ] + }, + "EXPIRE": { + "summary": "Set a key's time to live in seconds", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "seconds", + "type": "integer" + }, + { + "name": "condition", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "EXPIREAT": { + "summary": "Set the expiration for a key as a UNIX timestamp", + "since": "1.2.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." ] - } - ], - "optional": true - }, - { - "name": "order", - "type": "enum", - "enum": [ - "ASC", - "DESC" - ], - "optional": true - }, - { - "type": "block", - "name": "count", - "block": [ - { - "name": "count", - "command": "COUNT", - "type": "integer" - }, - { - "name": "any", - "type": "enum", - "enum": [ - "ANY" + ], + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "unix-time-seconds", + "type": "unix-time" + }, + { + "name": "condition", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "EXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key", + "since": "7.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "FAILOVER": { + "summary": "Start a coordinated failover between this server and one of its replicas.", + "since": "6.2.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "target", + "type": "block", + "token": "TO", + "optional": true, + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "force", + "type": "pure-token", + "token": "FORCE", + "optional": true + } + ] + }, + { + "name": "abort", + "type": "pure-token", + "token": "ABORT", + "optional": true + }, + { + "name": "milliseconds", + "type": "integer", + "token": "TIMEOUT", + "optional": true + } + ], + "command_flags": [ + "admin", + "noscript", + "stale" + ] + }, + "FCALL": { + "summary": "Invoke a function", + "since": "7.0.0", + "group": "scripting", + "complexity": "Depends on the function that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RW and UPDATE", + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "function", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "FCALL_RO": { + "summary": "Invoke a read-only function", + "since": "7.0.0", + "group": "scripting", + "complexity": "Depends on the function that is executed.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "key_specs": [ + { + "notes": "We cannot tell how the keys will be used so we assume the worst, RO and ACCESS", + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "function", + "type": "string" + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "arg", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "noscript", + "stale", + "skip_monitor", + "no_mandatory_keys", + "movablekeys" + ] + }, + "FLUSHALL": { + "summary": "Remove all keys from all databases", + "since": "1.0.0", + "group": "server", + "complexity": "O(N) where N is the total number of keys in all databases", + "history": [ + [ + "4.0.0", + "Added the `ASYNC` flushing mode modifier." + ], + [ + "6.2.0", + "Added the `SYNC` flushing mode modifier." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC", + "since": "4.0.0" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC", + "since": "6.2.0" + } + ] + } + ], + "command_flags": [ + "write" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FLUSHDB": { + "summary": "Remove all keys from the current database", + "since": "1.0.0", + "group": "server", + "complexity": "O(N) where N is the number of keys in the selected database", + "history": [ + [ + "4.0.0", + "Added the `ASYNC` flushing mode modifier." ], - "optional": true - } - ], - "optional": true - }, - { - "name": "withcoord", - "type": "enum", - "enum": [ - "WITHCOORD" - ], - "optional": true - }, - { - "name": "withdist", - "type": "enum", - "enum": [ - "WITHDIST" - ], - "optional": true - }, - { - "name": "withhash", - "type": "enum", - "enum": [ - "WITHHASH" - ], - "optional": true - } - ], - "since": "6.2", - "group": "geo" - }, - "GEOSEARCHSTORE": { - "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.", - "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "source", - "type": "key" - }, - { - "command": "FROMMEMBER", - "name": "member", - "type": "string", - "optional": true - }, - { - "command": "FROMLONLAT", - "name": [ - "longitude", - "latitude" - ], - "type": [ - "double", - "double" - ], - "optional": true - }, - { - "type": "block", - "name": "circle", - "block": [ - { - "name": "radius", - "command": "BYRADIUS", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" + [ + "6.2.0", + "Added the `SYNC` flushing mode modifier." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC", + "since": "4.0.0" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC", + "since": "6.2.0" + } + ] + } + ], + "command_flags": [ + "write" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FUNCTION": { + "summary": "A container for function commands", + "since": "7.0.0", + "group": "scripting", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "FUNCTION DELETE": { + "summary": "Delete a function by name", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@slow", + "@scripting" + ], + "arity": 3, + "arguments": [ + { + "name": "library-name", + "type": "string" + } + ], + "command_flags": [ + "write", + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FUNCTION DUMP": { + "summary": "Dump all functions into a serialized binary payload", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(N) where N is the number of functions", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "noscript" + ] + }, + "FUNCTION FLUSH": { + "summary": "Deleting all functions", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(N) where N is the number of functions deleted", + "acl_categories": [ + "@write", + "@slow", + "@scripting" + ], + "arity": -2, + "arguments": [ + { + "name": "async", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + } + ] + } + ], + "command_flags": [ + "write", + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FUNCTION HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "FUNCTION KILL": { + "summary": "Kill the function currently in execution.", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "noscript", + "allow_busy" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:one_succeeded" + ] + }, + "FUNCTION LIST": { + "summary": "List information about all the functions", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(N) where N is the number of functions", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -2, + "arguments": [ + { + "name": "library-name-pattern", + "type": "string", + "token": "LIBRARYNAME", + "optional": true + }, + { + "name": "withcode", + "type": "pure-token", + "token": "WITHCODE", + "optional": true + } + ], + "command_flags": [ + "noscript" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "FUNCTION LOAD": { + "summary": "Create a function with the given arguments (name, code, description)", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(1) (considering compilation time is redundant)", + "acl_categories": [ + "@write", + "@slow", + "@scripting" + ], + "arity": -3, + "arguments": [ + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "optional": true + }, + { + "name": "function-code", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FUNCTION RESTORE": { + "summary": "Restore all the functions on the given payload", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(N) where N is the number of functions on the payload", + "acl_categories": [ + "@write", + "@slow", + "@scripting" + ], + "arity": -3, + "arguments": [ + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "policy", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "flush", + "type": "pure-token", + "token": "FLUSH" + }, + { + "name": "append", + "type": "pure-token", + "token": "APPEND" + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "FUNCTION STATS": { + "summary": "Return information about the function currently running (name, description, duration)", + "since": "7.0.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "noscript", + "allow_busy" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_shards", + "response_policy:special" + ] + }, + "GEOADD": { + "summary": "Add one or more geospatial items in the geospatial index represented using a sorted set", + "since": "3.2.0", + "group": "geo", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "history": [ + [ + "6.2.0", + "Added the `CH`, `NX` and `XX` options." ] - } - ], - "optional": true - }, - { - "type": "block", - "name": "box", - "block": [ - { - "name": "width", - "command": "BYBOX", - "type": "double" - }, - { - "name": "height", - "type": "double" - }, - { - "name": "unit", - "type": "enum", - "enum": [ - "m", - "km", - "ft", - "mi" + ], + "acl_categories": [ + "@write", + "@geo", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "condition", + "type": "oneof", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "change", + "type": "pure-token", + "token": "CH", + "since": "6.2.0", + "optional": true + }, + { + "name": "longitude_latitude_member", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "member", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "GEODIST": { + "summary": "Returns the distance between two members of a geospatial index", + "since": "3.2.0", + "group": "geo", + "complexity": "O(log(N))", + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member1", + "type": "string" + }, + { + "name": "member2", + "type": "string" + }, + { + "name": "unit", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + } + ], + "command_flags": [ + "readonly" + ] + }, + "GEOHASH": { + "summary": "Returns members of a geospatial index as standard geohash strings", + "since": "3.2.0", + "group": "geo", + "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.", + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "GEOPOS": { + "summary": "Returns longitude and latitude of members of a geospatial index", + "since": "3.2.0", + "group": "geo", + "complexity": "O(N) where N is the number of members requested.", + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "GEORADIUS": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a point", + "since": "3.2.0", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` argument", + "history": [ + [ + "6.2.0", + "Added the `ANY` option for `COUNT`." + ] + ], + "acl_categories": [ + "@write", + "@geo", + "@slow" + ], + "arity": -6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + }, + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STORE", + "startfrom": 6 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STOREDIST", + "startfrom": 6 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + }, + { + "name": "withcoord", + "type": "pure-token", + "token": "WITHCOORD", + "optional": true + }, + { + "name": "withdist", + "type": "pure-token", + "token": "WITHDIST", + "optional": true + }, + { + "name": "withhash", + "type": "pure-token", + "token": "WITHHASH", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "since": "6.2.0", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "token": "STORE", + "optional": true + }, + { + "name": "key", + "type": "key", + "key_spec_index": 2, + "token": "STOREDIST", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ], + "doc_flags": [ + "deprecated" + ] + }, + "GEORADIUSBYMEMBER": { + "summary": "Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member", + "since": "3.2.0", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments", + "acl_categories": [ + "@write", + "@geo", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + }, + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STORE", + "startfrom": 5 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STOREDIST", + "startfrom": 5 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + }, + { + "name": "withcoord", + "type": "pure-token", + "token": "WITHCOORD", + "optional": true + }, + { + "name": "withdist", + "type": "pure-token", + "token": "WITHDIST", + "optional": true + }, + { + "name": "withhash", + "type": "pure-token", + "token": "WITHHASH", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "token": "STORE", + "optional": true + }, + { + "name": "key", + "type": "key", + "key_spec_index": 2, + "token": "STOREDIST", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ], + "doc_flags": [ + "deprecated" + ] + }, + "GEORADIUSBYMEMBER_RO": { + "summary": "A read-only variant for GEORADIUSBYMEMBER", + "since": "3.2.10", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` with the `BYRADIUS` and `FROMMEMBER` arguments", + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + }, + { + "name": "withcoord", + "type": "pure-token", + "token": "WITHCOORD", + "optional": true + }, + { + "name": "withdist", + "type": "pure-token", + "token": "WITHDIST", + "optional": true + }, + { + "name": "withhash", + "type": "pure-token", + "token": "WITHHASH", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "GEORADIUS_RO": { + "summary": "A read-only variant for GEORADIUS", + "since": "3.2.10", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index.", + "deprecated_since": "6.2.0", + "replaced_by": "`GEOSEARCH` with the `BYRADIUS` argument", + "history": [ + [ + "6.2.0", + "Added the `ANY` option for `COUNT`." ] - } - ], - "optional": true - }, - { - "name": "order", - "type": "enum", - "enum": [ - "ASC", - "DESC" - ], - "optional": true - }, - { - "type": "block", - "name": "count", - "block": [ - { - "name": "count", - "command": "COUNT", - "type": "integer" - }, - { - "name": "any", - "type": "enum", - "enum": [ - "ANY" + ], + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + }, + { + "name": "radius", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + }, + { + "name": "withcoord", + "type": "pure-token", + "token": "WITHCOORD", + "optional": true + }, + { + "name": "withdist", + "type": "pure-token", + "token": "WITHDIST", + "optional": true + }, + { + "name": "withhash", + "type": "pure-token", + "token": "WITHHASH", + "optional": true + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "since": "6.2.0", + "optional": true + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "GEOSEARCH": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.", + "since": "6.2.0", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "acl_categories": [ + "@read", + "@geo", + "@slow" + ], + "arity": -7, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "from", + "type": "oneof", + "arguments": [ + { + "name": "member", + "type": "string", + "token": "FROMMEMBER" + }, + { + "name": "longitude_latitude", + "type": "block", + "token": "FROMLONLAT", + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + } + ] + } + ] + }, + { + "name": "by", + "type": "oneof", + "arguments": [ + { + "name": "circle", + "type": "block", + "arguments": [ + { + "name": "radius", + "type": "double", + "token": "BYRADIUS" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + } + ] + }, + { + "name": "box", + "type": "block", + "arguments": [ + { + "name": "width", + "type": "double", + "token": "BYBOX" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + } + ] + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "optional": true + } + ] + }, + { + "name": "withcoord", + "type": "pure-token", + "token": "WITHCOORD", + "optional": true + }, + { + "name": "withdist", + "type": "pure-token", + "token": "WITHDIST", + "optional": true + }, + { + "name": "withhash", + "type": "pure-token", + "token": "WITHHASH", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "GEOSEARCHSTORE": { + "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.", + "since": "6.2.0", + "group": "geo", + "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape", + "acl_categories": [ + "@write", + "@geo", + "@slow" + ], + "arity": -8, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "source", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "from", + "type": "oneof", + "arguments": [ + { + "name": "member", + "type": "string", + "token": "FROMMEMBER" + }, + { + "name": "longitude_latitude", + "type": "block", + "token": "FROMLONLAT", + "arguments": [ + { + "name": "longitude", + "type": "double" + }, + { + "name": "latitude", + "type": "double" + } + ] + } + ] + }, + { + "name": "by", + "type": "oneof", + "arguments": [ + { + "name": "circle", + "type": "block", + "arguments": [ + { + "name": "radius", + "type": "double", + "token": "BYRADIUS" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + } + ] + }, + { + "name": "box", + "type": "block", + "arguments": [ + { + "name": "width", + "type": "double", + "token": "BYBOX" + }, + { + "name": "height", + "type": "double" + }, + { + "name": "unit", + "type": "oneof", + "arguments": [ + { + "name": "m", + "type": "pure-token", + "token": "M" + }, + { + "name": "km", + "type": "pure-token", + "token": "KM" + }, + { + "name": "ft", + "type": "pure-token", + "token": "FT" + }, + { + "name": "mi", + "type": "pure-token", + "token": "MI" + } + ] + } + ] + } + ] + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "count", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT" + }, + { + "name": "any", + "type": "pure-token", + "token": "ANY", + "optional": true + } + ] + }, + { + "name": "storedist", + "type": "pure-token", + "token": "STOREDIST", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "GET": { + "summary": "Get the value of a key", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@string", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "GETBIT": { + "summary": "Returns the bit value at offset in the string value stored at key", + "since": "2.2.0", + "group": "bitmap", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@bitmap", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "GETDEL": { + "summary": "Get the value of a key and delete the key", + "since": "6.2.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "GETEX": { + "summary": "Get the value of a key and optionally set its expiration", + "since": "6.2.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "notes": "RW and UPDATE because it changes the TTL", + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "expiration", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "seconds", + "type": "integer", + "token": "EX" + }, + { + "name": "milliseconds", + "type": "integer", + "token": "PX" + }, + { + "name": "unix-time-seconds", + "type": "unix-time", + "token": "EXAT" + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time", + "token": "PXAT" + }, + { + "name": "persist", + "type": "pure-token", + "token": "PERSIST" + } + ] + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "GETRANGE": { + "summary": "Get a substring of the string stored at a key", + "since": "2.4.0", + "group": "string", + "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", + "acl_categories": [ + "@read", + "@string", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + } + ], + "command_flags": [ + "readonly" + ] + }, + "GETSET": { + "summary": "Set the string value of a key and return its old value", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "deprecated_since": "6.2.0", + "replaced_by": "`SET` with the `!GET` argument", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ], + "doc_flags": [ + "deprecated" + ] + }, + "HDEL": { + "summary": "Delete one or more hash fields", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the number of fields to be removed.", + "history": [ + [ + "2.4.0", + "Accepts multiple `field` arguments." + ] + ], + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "HELLO": { + "summary": "Handshake with Redis", + "since": "6.0.0", + "group": "connection", + "complexity": "O(1)", + "history": [ + [ + "6.2.0", + "`protover` made optional; when called without arguments the command reports the current connection's context." + ] + ], + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": -1, + "arguments": [ + { + "name": "arguments", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "protover", + "type": "integer" + }, + { + "name": "username_password", + "type": "block", + "token": "AUTH", + "optional": true, + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "password", + "type": "string" + } + ] + }, + { + "name": "clientname", + "type": "string", + "token": "SETNAME", + "optional": true + } + ] + } + ], + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "no_auth", + "allow_busy" + ] + }, + "HEXISTS": { + "summary": "Determine if a hash field exists", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@hash", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "HGET": { + "summary": "Get the value of a hash field", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@hash", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "HGETALL": { + "summary": "Get all the fields and values in a hash", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the size of the hash.", + "acl_categories": [ + "@read", + "@hash", + "@slow" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "HINCRBY": { + "summary": "Increment the integer value of a hash field by the given number", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "increment", + "type": "integer" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "HINCRBYFLOAT": { + "summary": "Increment the float value of a hash field by the given amount", + "since": "2.6.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "increment", + "type": "double" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "HKEYS": { + "summary": "Get all the fields in a hash", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the size of the hash.", + "acl_categories": [ + "@read", + "@hash", + "@slow" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "HLEN": { + "summary": "Get the number of fields in a hash", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@hash", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "HMGET": { + "summary": "Get the values of all the given hash fields", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the number of fields being requested.", + "acl_categories": [ + "@read", + "@hash", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "HMSET": { + "summary": "Set multiple hash fields to multiple values", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the number of fields being set.", + "deprecated_since": "4.0.0", + "replaced_by": "`HSET` with multiple field-value pairs", + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ], + "doc_flags": [ + "deprecated" + ] + }, + "HRANDFIELD": { + "summary": "Get one or multiple random fields from a hash", + "since": "6.2.0", + "group": "hash", + "complexity": "O(N) where N is the number of fields returned", + "acl_categories": [ + "@read", + "@hash", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "options", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withvalues", + "type": "pure-token", + "token": "WITHVALUES", + "optional": true + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "HSCAN": { + "summary": "Incrementally iterate hash fields and associated values", + "since": "2.8.0", + "group": "hash", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "acl_categories": [ + "@read", + "@hash", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "name": "pattern", + "type": "pattern", + "token": "MATCH", + "optional": true + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "HSET": { + "summary": "Set the string value of a hash field", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs.", + "history": [ + [ + "4.0.0", + "Accepts multiple `field` and `value` arguments." + ] + ], + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "HSETNX": { + "summary": "Set the value of a hash field, only if the field does not exist", + "since": "2.0.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@hash", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "HSTRLEN": { + "summary": "Get the length of the value of a hash field", + "since": "3.2.0", + "group": "hash", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@hash", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "field", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "HVALS": { + "summary": "Get all the values in a hash", + "since": "2.0.0", + "group": "hash", + "complexity": "O(N) where N is the size of the hash.", + "acl_categories": [ + "@read", + "@hash", + "@slow" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "INCR": { + "summary": "Increment the integer value of a key by one", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "INCRBY": { + "summary": "Increment the integer value of a key by the given amount", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "integer" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "INCRBYFLOAT": { + "summary": "Increment the float value of a key by the given amount", + "since": "2.6.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "double" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "INFO": { + "summary": "Get information and statistics about the server", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added support for taking multiple section arguments." + ] + ], + "acl_categories": [ + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "section", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_shards", + "response_policy:special" + ] + }, + "KEYS": { + "summary": "Find all keys matching the given pattern", + "since": "1.0.0", + "group": "generic", + "complexity": "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length.", + "acl_categories": [ + "@keyspace", + "@read", + "@slow", + "@dangerous" + ], + "arity": 2, + "arguments": [ + { + "name": "pattern", + "type": "pattern" + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "request_policy:all_shards", + "nondeterministic_output_order" + ] + }, + "LASTSAVE": { + "summary": "Get the UNIX time stamp of the last successful save to disk", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@fast", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "loading", + "stale", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "LATENCY": { + "summary": "A container for latency diagnostics commands", + "since": "2.8.13", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "LATENCY DOCTOR": { + "summary": "Return a human readable latency analysis report.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_nodes", + "response_policy:special" + ] + }, + "LATENCY GRAPH": { + "summary": "Return a latency graph for the event.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "event", + "type": "string" + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_nodes", + "response_policy:special" + ] + }, + "LATENCY HELP": { + "summary": "Show helpful text about the different subcommands.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "LATENCY HISTOGRAM": { + "summary": "Return the cumulative distribution of latencies of a subset of commands or all.", + "since": "7.0.0", + "group": "server", + "complexity": "O(N) where N is the number of commands with latency information being retrieved.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "command", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_nodes", + "response_policy:special" + ] + }, + "LATENCY HISTORY": { + "summary": "Return timestamp-latency samples for the event.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "event", + "type": "string" + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_nodes", + "response_policy:special" + ] + }, + "LATENCY LATEST": { + "summary": "Return the latest latency samples for all events.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "nondeterministic_output", + "request_policy:all_nodes", + "response_policy:special" + ] + }, + "LATENCY RESET": { + "summary": "Reset latency data for one or more events.", + "since": "2.8.13", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "event", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, + "LCS": { + "summary": "Find longest common substring", + "since": "7.0.0", + "group": "string", + "complexity": "O(N*M) where N and M are the lengths of s1 and s2, respectively", + "acl_categories": [ + "@read", + "@string", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key1", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key2", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "len", + "type": "pure-token", + "token": "LEN", + "optional": true + }, + { + "name": "idx", + "type": "pure-token", + "token": "IDX", + "optional": true + }, + { + "name": "len", + "type": "integer", + "token": "MINMATCHLEN", + "optional": true + }, + { + "name": "withmatchlen", + "type": "pure-token", + "token": "WITHMATCHLEN", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "LINDEX": { + "summary": "Get an element from a list by its index", + "since": "1.0.0", + "group": "list", + "complexity": "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1).", + "acl_categories": [ + "@read", + "@list", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "integer" + } + ], + "command_flags": [ + "readonly" + ] + }, + "LINSERT": { + "summary": "Insert an element before or after another element in a list", + "since": "2.2.0", + "group": "list", + "complexity": "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N).", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "before", + "type": "pure-token", + "token": "BEFORE" + }, + { + "name": "after", + "type": "pure-token", + "token": "AFTER" + } + ] + }, + { + "name": "pivot", + "type": "string" + }, + { + "name": "element", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "LLEN": { + "summary": "Get the length of a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@list", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "LMOVE": { + "summary": "Pop an element from a list, push it to another list and return it", + "since": "6.2.0", + "group": "list", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "wherefrom", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "whereto", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "LMPOP": { + "summary": "Pop elements from a list", + "since": "7.0.0", + "group": "list", + "complexity": "O(N+M) where N is the number of provided keys and M is the number of elements returned.", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "left", + "type": "pure-token", + "token": "LEFT" + }, + { + "name": "right", + "type": "pure-token", + "token": "RIGHT" + } + ] + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "write", + "movablekeys" + ] + }, + "LOLWUT": { + "summary": "Display some computer art and the Redis version", + "since": "5.0.0", + "group": "server", + "acl_categories": [ + "@read", + "@fast" + ], + "arity": -1, + "arguments": [ + { + "name": "version", + "type": "integer", + "token": "VERSION", + "optional": true + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "LPOP": { + "summary": "Remove and get the first elements in a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(N) where N is the number of elements returned", + "history": [ + [ + "6.2.0", + "Added the `count` argument." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "since": "6.2.0", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "LPOS": { + "summary": "Return the index of matching elements on a list", + "since": "6.0.6", + "group": "list", + "complexity": "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time.", + "acl_categories": [ + "@read", + "@list", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string" + }, + { + "name": "rank", + "type": "integer", + "token": "RANK", + "optional": true + }, + { + "name": "num-matches", + "type": "integer", + "token": "COUNT", + "optional": true + }, + { + "name": "len", + "type": "integer", + "token": "MAXLEN", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "LPUSH": { + "summary": "Prepend one or multiple elements to a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "history": [ + [ + "2.4.0", + "Accepts multiple `element` arguments." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "LPUSHX": { + "summary": "Prepend an element to a list, only if the list exists", + "since": "2.2.0", + "group": "list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "history": [ + [ + "4.0.0", + "Accepts multiple `element` arguments." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "LRANGE": { + "summary": "Get a range of elements from a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range.", + "acl_categories": [ + "@read", + "@list", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ], + "command_flags": [ + "readonly" + ] + }, + "LREM": { + "summary": "Remove elements from a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(N+M) where N is the length of the list and M is the number of elements removed.", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer" + }, + { + "name": "element", + "type": "string" + } + ], + "command_flags": [ + "write" + ] + }, + "LSET": { + "summary": "Set the value of an element in a list by its index", + "since": "1.0.0", + "group": "list", + "complexity": "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1).", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "index", + "type": "integer" + }, + { + "name": "element", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "LTRIM": { + "summary": "Trim a list to the specified range", + "since": "1.0.0", + "group": "list", + "complexity": "O(N) where N is the number of elements to be removed by the operation.", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ], + "command_flags": [ + "write" + ] + }, + "MEMORY": { + "summary": "A container for memory diagnostics commands", + "since": "4.0.0", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "MEMORY DOCTOR": { + "summary": "Outputs memory problems report", + "since": "4.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "hints": [ + "nondeterministic_output", + "request_policy:all_shards", + "response_policy:special" + ] + }, + "MEMORY HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "4.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "MEMORY MALLOC-STATS": { + "summary": "Show allocator internal stats", + "since": "4.0.0", + "group": "server", + "complexity": "Depends on how much memory is allocated, could be slow", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "hints": [ + "nondeterministic_output", + "request_policy:all_shards", + "response_policy:special" + ] + }, + "MEMORY PURGE": { + "summary": "Ask the allocator to release memory", + "since": "4.0.0", + "group": "server", + "complexity": "Depends on how much memory is allocated, could be slow", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "MEMORY STATS": { + "summary": "Show memory usage details", + "since": "4.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "hints": [ + "nondeterministic_output", + "request_policy:all_shards", + "response_policy:special" + ] + }, + "MEMORY USAGE": { + "summary": "Estimate the memory usage of a key", + "since": "4.0.0", + "group": "server", + "complexity": "O(N) where N is the number of samples.", + "acl_categories": [ + "@read", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "token": "SAMPLES", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "MGET": { + "summary": "Get the values of all the given keys", + "since": "1.0.0", + "group": "string", + "complexity": "O(N) where N is the number of keys to retrieve.", + "acl_categories": [ + "@read", + "@string", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "request_policy:multi_shard" + ] + }, + "MIGRATE": { + "summary": "Atomically transfer a key from a Redis instance to another one.", + "since": "2.6.0", + "group": "generic", + "complexity": "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed.", + "history": [ + [ + "3.0.0", + "Added the `COPY` and `REPLACE` options." ], - "optional": true - } - ], - "optional": true - }, - { - "name": "storedist", - "type": "enum", - "enum": [ - "STOREDIST" - ], - "optional": true - } - ], - "since": "6.2", - "group": "geo" - }, - "GET": { - "summary": "Get the value of a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "string" - }, - "GETBIT": { - "summary": "Returns the bit value at offset in the string value stored at key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "offset", - "type": "integer" - } - ], - "since": "2.2.0", - "group": "bitmap" - }, - "GETDEL": { - "summary":"Get the value of a key and delete the key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "6.2.0", - "group": "string" - }, - "GETEX": { - "summary": "Get the value of a key and optionally set its expiration", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "expiration", - "type": "enum", - "enum": [ - "EX seconds", - "PX milliseconds", - "EXAT timestamp", - "PXAT milliseconds-timestamp", - "PERSIST" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "string" - }, - "GETRANGE": { - "summary": "Get a substring of the string stored at a key", - "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "integer" - }, - { - "name": "end", - "type": "integer" - } - ], - "since": "2.4.0", - "group": "string" - }, - "GETSET": { - "summary": "Set the string value of a key and return its old value", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "1.0.0", - "group": "string" - }, - "HDEL": { - "summary": "Delete one or more hash fields", - "complexity": "O(N) where N is the number of fields to be removed.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string", - "multiple": true - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HELLO": { - "summary": "Handshake with Redis", - "complexity": "O(1)", - "arguments": [ - { - "name": "arguments", - "type": "block", - "block": [ - { - "name": "protover", - "type": "integer" - }, - { - "command": "AUTH", - "name": [ - "username", - "password" + [ + "3.0.6", + "Added the `KEYS` option." ], - "type": [ - "string", - "string" + [ + "4.0.7", + "Added the `AUTH` option." ], - "optional": true - }, - { - "command": "SETNAME", - "name": "clientname", - "type": "string", - "optional": true - } - ], - "optional": true - } - ], - "since": "6.0.0", - "group": "connection" - }, - "HEXISTS": { - "summary": "Determine if a hash field exists", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HGET": { - "summary": "Get the value of a hash field", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HGETALL": { - "summary": "Get all the fields and values in a hash", - "complexity": "O(N) where N is the size of the hash.", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HINCRBY": { - "summary": "Increment the integer value of a hash field by the given number", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - }, - { - "name": "increment", - "type": "integer" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HINCRBYFLOAT": { - "summary": "Increment the float value of a hash field by the given amount", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - }, - { - "name": "increment", - "type": "double" - } - ], - "since": "2.6.0", - "group": "hash" - }, - "HKEYS": { - "summary": "Get all the fields in a hash", - "complexity": "O(N) where N is the size of the hash.", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HLEN": { - "summary": "Get the number of fields in a hash", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HMGET": { - "summary": "Get the values of all the given hash fields", - "complexity": "O(N) where N is the number of fields being requested.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string", - "multiple": true - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HMSET": { - "summary": "Set multiple hash fields to multiple values", - "complexity": "O(N) where N is the number of fields being set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": [ - "field", - "value" - ], - "type": [ - "string", - "string" - ], - "multiple": true - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HSET": { - "summary": "Set the string value of a hash field", - "complexity": "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": [ - "field", - "value" - ], - "type": [ - "string", - "string" - ], - "multiple": true - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HSETNX": { - "summary": "Set the value of a hash field, only if the field does not exist", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "HRANDFIELD": { - "summary": "Get one or multiple random fields from a hash", - "complexity": "O(N) where N is the number of fields returned", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "options", - "type": "block", - "block": [ - { - "name": "count", - "type": "integer" - }, - { - "name": "withvalues", - "type": "enum", - "enum": [ - "WITHVALUES" + [ + "6.0.0", + "Added the `AUTH2` option." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@slow", + "@dangerous" + ], + "arity": -6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 3 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "KEYS", + "startfrom": -2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true, + "incomplete": true + } + ], + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "key_or_empty_string", + "type": "oneof", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "empty_string", + "type": "pure-token", + "token": "" + } + ] + }, + { + "name": "destination-db", + "type": "integer" + }, + { + "name": "timeout", + "type": "integer" + }, + { + "name": "copy", + "type": "pure-token", + "token": "COPY", + "since": "3.0.0", + "optional": true + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "since": "3.0.0", + "optional": true + }, + { + "name": "authentication", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "password", + "type": "string", + "token": "AUTH", + "since": "4.0.7", + "optional": true + }, + { + "name": "username_password", + "type": "block", + "token": "AUTH2", + "since": "6.0.0", + "optional": true, + "arguments": [ + { + "name": "username", + "type": "string" + }, + { + "name": "password", + "type": "string" + } + ] + } + ] + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "token": "KEYS", + "since": "3.0.6", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "write", + "movablekeys" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "MODULE": { + "summary": "A container for module commands", + "since": "4.0.0", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "MODULE HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "MODULE LIST": { + "summary": "List all modules loaded by the server", + "since": "4.0.0", + "group": "server", + "complexity": "O(N) where N is the number of loaded modules.", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "noscript" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "MODULE LOAD": { + "summary": "Load a module", + "since": "4.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "path", + "type": "string" + }, + { + "name": "arg", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "admin", + "noscript", + "no_async_loading" + ] + }, + "MODULE LOADEX": { + "summary": "Load a module with extended parameters", + "since": "7.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "path", + "type": "string" + }, + { + "name": "configs", + "type": "block", + "token": "CONFIG", + "optional": true, + "multiple": true, + "multiple_token": true, + "arguments": [ + { + "name": "name", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + }, + { + "name": "args", + "type": "block", + "token": "ARGS", + "optional": true, + "multiple": true, + "arguments": [ + { + "name": "arg", + "type": "string" + } + ] + } + ], + "command_flags": [ + "admin", + "noscript", + "no_async_loading" + ] + }, + "MODULE UNLOAD": { + "summary": "Unload a module", + "since": "4.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "name", + "type": "string" + } + ], + "command_flags": [ + "admin", + "noscript", + "no_async_loading" + ] + }, + "MONITOR": { + "summary": "Listen for all requests received by the server in real time", + "since": "1.0.0", + "group": "server", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale" + ] + }, + "MOVE": { + "summary": "Move a key to another database", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "db", + "type": "integer" + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "MSET": { + "summary": "Set multiple keys to multiple values", + "since": "1.0.1", + "group": "string", + "complexity": "O(N) where N is the number of keys to set.", + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 2, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:all_succeeded" + ] + }, + "MSETNX": { + "summary": "Set multiple keys to multiple values, only if none of the keys exist", + "since": "1.0.1", + "group": "string", + "complexity": "O(N) where N is the number of keys to set.", + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 2, + "limit": 0 + } + }, + "OW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:agg_min" + ] + }, + "MULTI": { + "summary": "Mark the start of a transaction block", + "since": "1.2.0", + "group": "transactions", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@transaction" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "allow_busy" + ] + }, + "OBJECT": { + "summary": "A container for object introspection commands", + "since": "2.2.3", + "group": "generic", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "OBJECT ENCODING": { + "summary": "Inspect the internal encoding of a Redis object", + "since": "2.2.3", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "OBJECT FREQ": { + "summary": "Get the logarithmic access frequency counter of a Redis object", + "since": "4.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "OBJECT HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "6.2.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "OBJECT IDLETIME": { + "summary": "Get the time since a Redis object was last accessed", + "since": "2.2.3", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "OBJECT REFCOUNT": { + "summary": "Get the number of references to the value of the key", + "since": "2.2.3", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "PERSIST": { + "summary": "Remove the expiration from a key", + "since": "2.2.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "PEXPIRE": { + "summary": "Set a key's time to live in milliseconds", + "since": "2.6.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "milliseconds", + "type": "integer" + }, + { + "name": "condition", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "PEXPIREAT": { + "summary": "Set the expiration for a key as a UNIX timestamp specified in milliseconds", + "since": "2.6.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added options: `NX`, `XX`, `GT` and `LT`." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time" + }, + { + "name": "condition", + "type": "oneof", + "since": "7.0.0", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + }, + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "PEXPIRETIME": { + "summary": "Get the expiration Unix timestamp for a key in milliseconds", + "since": "7.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "PFADD": { + "summary": "Adds the specified elements to the specified HyperLogLog.", + "since": "2.8.9", + "group": "hyperloglog", + "complexity": "O(1) to add every element.", + "acl_categories": [ + "@write", + "@hyperloglog", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "PFCOUNT": { + "summary": "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", + "since": "2.8.9", + "group": "hyperloglog", + "complexity": "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", + "acl_categories": [ + "@read", + "@hyperloglog", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "notes": "RW because it may change the internal representation of the key, and propagate to replicas", + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "PFDEBUG": { + "summary": "Internal commands for debugging HyperLogLog values", + "since": "2.8.9", + "group": "hyperloglog", + "complexity": "N/A", + "acl_categories": [ + "@write", + "@hyperloglog", + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true + } + ], + "arguments": [ + { + "name": "subcommand", + "type": "string" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "write", + "denyoom", + "admin" + ], + "doc_flags": [ + "syscmd" + ] + }, + "PFMERGE": { + "summary": "Merge N different HyperLogLogs into a single one.", + "since": "2.8.9", + "group": "hyperloglog", + "complexity": "O(N) to merge N HyperLogLogs, but with high constant times.", + "acl_categories": [ + "@write", + "@hyperloglog", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "insert": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destkey", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "sourcekey", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "PFSELFTEST": { + "summary": "An internal command for testing HyperLogLog values", + "since": "2.8.9", + "group": "hyperloglog", + "complexity": "N/A", + "acl_categories": [ + "@hyperloglog", + "@admin", + "@slow", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "admin" + ], + "doc_flags": [ + "syscmd" + ] + }, + "PING": { + "summary": "Ping the server", + "since": "1.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": -1, + "arguments": [ + { + "name": "message", + "type": "string", + "optional": true + } + ], + "command_flags": [ + "fast" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:all_succeeded" + ] + }, + "PSETEX": { + "summary": "Set the value and expiration in milliseconds of a key", + "since": "2.6.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "milliseconds", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "PSUBSCRIBE": { + "summary": "Listen for messages published to channels matching the given patterns", + "since": "2.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of patterns the client is already subscribed to.", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "pattern", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "pattern", + "type": "pattern" + } + ] + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "PSYNC": { + "summary": "Internal command used for replication", + "since": "2.8.0", + "group": "server", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -3, + "arguments": [ + { + "name": "replicationid", + "type": "string" + }, + { + "name": "offset", + "type": "integer" + } + ], + "command_flags": [ + "admin", + "noscript", + "no_async_loading", + "no_multi" + ] + }, + "PTTL": { + "summary": "Get the time to live for a key in milliseconds", + "since": "2.6.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "2.8.0", + "Added the -2 reply." + ] + ], + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "PUBLISH": { + "summary": "Post a message to a channel", + "since": "2.0.0", + "group": "pubsub", + "complexity": "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client).", + "acl_categories": [ + "@pubsub", + "@fast" + ], + "arity": 3, + "arguments": [ + { + "name": "channel", + "type": "string" + }, + { + "name": "message", + "type": "string" + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale", + "fast" + ] + }, + "PUBSUB": { + "summary": "A container for Pub/Sub commands", + "since": "2.8.0", + "group": "pubsub", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "PUBSUB CHANNELS": { + "summary": "List active channels", + "since": "2.8.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns)", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale" + ] + }, + "PUBSUB HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "6.2.0", + "group": "pubsub", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "PUBSUB NUMPAT": { + "summary": "Get the count of unique patterns pattern subscriptions", + "since": "2.8.0", + "group": "pubsub", + "complexity": "O(1)", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": 2, + "command_flags": [ + "pubsub", + "loading", + "stale" + ] + }, + "PUBSUB NUMSUB": { + "summary": "Get the count of subscribers for channels", + "since": "2.8.0", + "group": "pubsub", + "complexity": "O(N) for the NUMSUB subcommand, where N is the number of requested channels", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "channel", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale" + ] + }, + "PUBSUB SHARDCHANNELS": { + "summary": "List active shard channels", + "since": "7.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short shard channels).", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale" + ] + }, + "PUBSUB SHARDNUMSUB": { + "summary": "Get the count of subscribers for shard channels", + "since": "7.0.0", + "group": "pubsub", + "complexity": "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested shard channels", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale" + ] + }, + "PUNSUBSCRIBE": { + "summary": "Stop listening for messages posted to channels matching the given patterns", + "since": "2.0.0", + "group": "pubsub", + "complexity": "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client).", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -1, + "arguments": [ + { + "name": "pattern", + "type": "pattern", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "QUIT": { + "summary": "Close the connection", + "since": "1.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": -1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "no_auth", + "allow_busy" + ] + }, + "RANDOMKEY": { + "summary": "Return a random key from the keyspace", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": 1, + "command_flags": [ + "readonly" + ], + "hints": [ + "request_policy:all_shards", + "nondeterministic_output" + ] + }, + "READONLY": { + "summary": "Enables read queries for a connection to a cluster replica node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 1, + "command_flags": [ + "loading", + "stale", + "fast" + ] + }, + "READWRITE": { + "summary": "Disables read queries for a connection to a cluster replica node", + "since": "3.0.0", + "group": "cluster", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 1, + "command_flags": [ + "loading", + "stale", + "fast" + ] + }, + "RENAME": { + "summary": "Rename a key", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@write", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "newkey", + "type": "key", + "key_spec_index": 1 + } + ], + "command_flags": [ + "write" + ] + }, + "RENAMENX": { + "summary": "Rename a key, only if the new key does not exist", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "3.2.0", + "The command no longer returns an error when source and destination names are the same." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "newkey", + "type": "key", + "key_spec_index": 1 + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "REPLCONF": { + "summary": "An internal command for configuring the replication stream", + "since": "3.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -1, + "command_flags": [ + "admin", + "noscript", + "loading", + "stale", + "allow_busy" + ], + "doc_flags": [ + "syscmd" + ] + }, + "REPLICAOF": { + "summary": "Make the server a replica of another instance, or promote it as master.", + "since": "5.0.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ], + "command_flags": [ + "admin", + "noscript", + "stale", + "no_async_loading" + ] + }, + "RESET": { + "summary": "Reset the connection", + "since": "6.2.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "no_auth", + "allow_busy" + ] + }, + "RESTORE": { + "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", + "since": "2.6.0", + "group": "generic", + "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", + "history": [ + [ + "3.0.0", + "Added the `REPLACE` modifier." ], - "optional": true - } - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "hash" - }, - "HSTRLEN": { - "summary": "Get the length of the value of a hash field", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "field", - "type": "string" - } - ], - "since": "3.2.0", - "group": "hash" - }, - "HVALS": { - "summary": "Get all the values in a hash", - "complexity": "O(N) where N is the size of the hash.", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.0.0", - "group": "hash" - }, - "INCR": { - "summary": "Increment the integer value of a key by one", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "string" - }, - "INCRBY": { - "summary": "Increment the integer value of a key by the given amount", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "increment", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "string" - }, - "INCRBYFLOAT": { - "summary": "Increment the float value of a key by the given amount", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "increment", - "type": "double" - } - ], - "since": "2.6.0", - "group": "string" - }, - "INFO": { - "summary": "Get information and statistics about the server", - "arguments": [ - { - "name": "section", - "type": "string", - "optional": true - } - ], - "since": "1.0.0", - "group": "server" - }, - "LOLWUT": { - "summary": "Display some computer art and the Redis version", - "arguments": [ - { - "command": "VERSION", - "name": "version", - "type": "integer", - "optional": true - } - ], - "since": "5.0.0", - "group": "server" - }, - "KEYS": { - "summary": "Find all keys matching the given pattern", - "complexity": "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length.", - "arguments": [ - { - "name": "pattern", - "type": "pattern" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "LASTSAVE": { - "summary": "Get the UNIX time stamp of the last successful save to disk", - "since": "1.0.0", - "group": "server" - }, - "LINDEX": { - "summary": "Get an element from a list by its index", - "complexity": "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "index", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "list" - }, - "LINSERT": { - "summary": "Insert an element before or after another element in a list", - "complexity": "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "where", - "type": "enum", - "enum": [ - "BEFORE", - "AFTER" - ] - }, - { - "name": "pivot", - "type": "string" - }, - { - "name": "element", - "type": "string" - } - ], - "since": "2.2.0", - "group": "list" - }, - "LLEN": { - "summary": "Get the length of a list", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "list" - }, - "LPOP": { - "summary": "Remove and get the first elements in a list", - "complexity": "O(N) where N is the number of elements returned", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "1.0.0", - "group": "list" - }, - "LPOS": { - "summary": "Return the index of matching elements on a list", - "complexity": "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string" - }, - { - "command": "RANK", - "name": "rank", - "type": "integer", - "optional": true - }, - { - "command": "COUNT", - "name": "num-matches", - "type": "integer", - "optional": true - }, - { - "command": "MAXLEN", - "name": "len", - "type": "integer", - "optional": true - } - ], - "since": "6.0.6", - "group": "list" - }, - "LPUSH": { - "summary": "Prepend one or multiple elements to a list", - "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string", - "multiple": true - } - ], - "since": "1.0.0", - "group": "list" - }, - "LPUSHX": { - "summary": "Prepend an element to a list, only if the list exists", - "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string", - "multiple": true - } - ], - "since": "2.2.0", - "group": "list" - }, - "LRANGE": { - "summary": "Get a range of elements from a list", - "complexity": "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "integer" - }, - { - "name": "stop", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "list" - }, - "LREM": { - "summary": "Remove elements from a list", - "complexity": "O(N+M) where N is the length of the list and M is the number of elements removed.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer" - }, - { - "name": "element", - "type": "string" - } - ], - "since": "1.0.0", - "group": "list" - }, - "LSET": { - "summary": "Set the value of an element in a list by its index", - "complexity": "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "index", - "type": "integer" - }, - { - "name": "element", - "type": "string" - } - ], - "since": "1.0.0", - "group": "list" - }, - "LTRIM": { - "summary": "Trim a list to the specified range", - "complexity": "O(N) where N is the number of elements to be removed by the operation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "integer" - }, - { - "name": "stop", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "list" - }, - "MEMORY DOCTOR": { - "summary": "Outputs memory problems report", - "since": "4.0.0", - "group": "server" - }, - "MEMORY HELP": { - "summary": "Show helpful text about the different subcommands", - "since": "4.0.0", - "group": "server" - }, - "MEMORY MALLOC-STATS": { - "summary": "Show allocator internal stats", - "since": "4.0.0", - "group": "server" - }, - "MEMORY PURGE": { - "summary": "Ask the allocator to release memory", - "since": "4.0.0", - "group": "server" - }, - "MEMORY STATS": { - "summary": "Show memory usage details", - "since": "4.0.0", - "group": "server" - }, - "MEMORY USAGE": { - "summary": "Estimate the memory usage of a key", - "complexity": "O(N) where N is the number of samples.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "command": "SAMPLES", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "4.0.0", - "group": "server" - }, - "MGET": { - "summary": "Get the values of all the given keys", - "complexity": "O(N) where N is the number of keys to retrieve.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "string" - }, - "MIGRATE": { - "summary": "Atomically transfer a key from a Redis instance to another one.", - "complexity": "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed.", - "arguments": [ - { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "string" - }, - { - "name": "key", - "type": "enum", - "enum": [ - "key", - "\"\"" - ] - }, - { - "name": "destination-db", - "type": "integer" - }, - { - "name": "timeout", - "type": "integer" - }, - { - "name": "copy", - "type": "enum", - "enum": [ - "COPY" - ], - "optional": true - }, - { - "name": "replace", - "type": "enum", - "enum": [ - "REPLACE" - ], - "optional": true - }, - { - "command": "AUTH", - "name": "password", - "type": "string", - "optional": true - }, - { - "command": "AUTH2", - "name": "username password", - "type": "string", - "optional": true - }, - { - "name": "key", - "command": "KEYS", - "type": "key", - "variadic": true, - "optional": true - } - ], - "since": "2.6.0", - "group": "generic" - }, - "MODULE LIST": { - "summary": "List all modules loaded by the server", - "complexity": "O(N) where N is the number of loaded modules.", - "since": "4.0.0", - "group": "server" - }, - "MODULE LOAD": { - "summary": "Load a module", - "complexity": "O(1)", - "arguments": [ - { - "name": "path", - "type": "string" - }, - { - "name": "arg", - "type": "string", - "variadic": true, - "optional": true - } - ], - "since": "4.0.0", - "group": "server" - }, - "MODULE UNLOAD": { - "summary": "Unload a module", - "complexity": "O(1)", - "arguments": [ - { - "name": "name", - "type": "string" - } - ], - "since": "4.0.0", - "group": "server" - }, - "MONITOR": { - "summary": "Listen for all requests received by the server in real time", - "since": "1.0.0", - "group": "server" - }, - "MOVE": { - "summary": "Move a key to another database", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "db", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "MSET": { - "summary": "Set multiple keys to multiple values", - "complexity": "O(N) where N is the number of keys to set.", - "arguments": [ - { - "name": [ - "key", - "value" - ], - "type": [ - "key", - "string" - ], - "multiple": true - } - ], - "since": "1.0.1", - "group": "string" - }, - "MSETNX": { - "summary": "Set multiple keys to multiple values, only if none of the keys exist", - "complexity": "O(N) where N is the number of keys to set.", - "arguments": [ - { - "name": [ - "key", - "value" - ], - "type": [ - "key", - "string" - ], - "multiple": true - } - ], - "since": "1.0.1", - "group": "string" - }, - "MULTI": { - "summary": "Mark the start of a transaction block", - "since": "1.2.0", - "group": "transactions" - }, - "OBJECT": { - "summary": "Inspect the internals of Redis objects", - "complexity": "O(1) for all the currently implemented subcommands.", - "since": "2.2.3", - "group": "generic", - "arguments": [ - { - "name": "subcommand", - "type": "string" - }, - { - "name": "arguments", - "type": "string", - "optional": true, - "multiple": true - } - ] - }, - "PERSIST": { - "summary": "Remove the expiration from a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.2.0", - "group": "generic" - }, - "PEXPIRE": { - "summary": "Set a key's time to live in milliseconds", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "milliseconds", - "type": "integer" - } - ], - "since": "2.6.0", - "group": "generic" - }, - "PEXPIREAT": { - "summary": "Set the expiration for a key as a UNIX timestamp specified in milliseconds", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "milliseconds-timestamp", - "type": "posix time" - } - ], - "since": "2.6.0", - "group": "generic" - }, - "PEXPIRETIME": { - "summary": "Get the expiration Unix timestamp for a key in milliseconds", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "7.0.0", - "group": "generic" - }, - "PFADD": { - "summary": "Adds the specified elements to the specified HyperLogLog.", - "complexity": "O(1) to add every element.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.8.9", - "group": "hyperloglog" - }, - "PFCOUNT": { - "summary": "Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s).", - "complexity": "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "2.8.9", - "group": "hyperloglog" - }, - "PFMERGE": { - "summary": "Merge N different HyperLogLogs into a single one.", - "complexity": "O(N) to merge N HyperLogLogs, but with high constant times.", - "arguments": [ - { - "name": "destkey", - "type": "key" - }, - { - "name": "sourcekey", - "type": "key", - "multiple": true - } - ], - "since": "2.8.9", - "group": "hyperloglog" - }, - "PING": { - "summary": "Ping the server", - "arguments": [ - { - "name": "message", - "type": "string", - "optional": true - } - ], - "since": "1.0.0", - "group": "connection" - }, - "PSETEX": { - "summary": "Set the value and expiration in milliseconds of a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "milliseconds", - "type": "integer" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.6.0", - "group": "string" - }, - "PSUBSCRIBE": { - "summary": "Listen for messages published to channels matching the given patterns", - "complexity": "O(N) where N is the number of patterns the client is already subscribed to.", - "arguments": [ - { - "name": [ - "pattern" - ], - "type": [ - "pattern" - ], - "multiple": true - } - ], - "since": "2.0.0", - "group": "pubsub" - }, - "PUBSUB": { - "summary": "Inspect the state of the Pub/Sub subsystem", - "complexity": "O(N) for the CHANNELS subcommand, where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns). O(N) for the NUMSUB subcommand, where N is the number of requested channels. O(1) for the NUMPAT subcommand.", - "arguments": [ - { - "name": "subcommand", - "type": "string" - }, - { - "name": "argument", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.8.0", - "group": "pubsub" - }, - "PTTL": { - "summary": "Get the time to live for a key in milliseconds", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.6.0", - "group": "generic" - }, - "PUBLISH": { - "summary": "Post a message to a channel", - "complexity": "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client).", - "arguments": [ - { - "name": "channel", - "type": "string" - }, - { - "name": "message", - "type": "string" - } - ], - "since": "2.0.0", - "group": "pubsub" - }, - "PUNSUBSCRIBE": { - "summary": "Stop listening for messages posted to channels matching the given patterns", - "complexity": "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client).", - "arguments": [ - { - "name": "pattern", - "type": "pattern", - "optional": true, - "multiple": true - } - ], - "since": "2.0.0", - "group": "pubsub" - }, - "QUIT": { - "summary": "Close the connection", - "since": "1.0.0", - "group": "connection" - }, - "RANDOMKEY": { - "summary": "Return a random key from the keyspace", - "complexity": "O(1)", - "since": "1.0.0", - "group": "generic" - }, - "READONLY": { - "summary": "Enables read queries for a connection to a cluster replica node", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "READWRITE": { - "summary": "Disables read queries for a connection to a cluster replica node", - "complexity": "O(1)", - "since": "3.0.0", - "group": "cluster" - }, - "RENAME": { - "summary": "Rename a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "newkey", - "type": "key" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "RENAMENX": { - "summary": "Rename a key, only if the new key does not exist", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "newkey", - "type": "key" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "RESET": { - "summary": "Reset the connection", - "since": "6.2", - "group": "connection" - }, - "RESTORE": { - "summary": "Create a key using the provided serialized value, previously obtained using DUMP.", - "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "ttl", - "type": "integer" - }, - { - "name": "serialized-value", - "type": "string" - }, - { - "name": "replace", - "type": "enum", - "enum": [ - "REPLACE" - ], - "optional": true - }, - { - "name": "absttl", - "type": "enum", - "enum": [ - "ABSTTL" - ], - "optional": true - }, - { - "command": "IDLETIME", - "name": "seconds", - "type": "integer", - "optional": true - }, - { - "command": "FREQ", - "name": "frequency", - "type": "integer", - "optional": true - } - ], - "since": "2.6.0", - "group": "generic" - }, - "ROLE": { - "summary": "Return the role of the instance in the context of replication", - "since": "2.8.12", - "group": "server" - }, - "RPOP": { - "summary": "Remove and get the last elements in a list", - "complexity": "O(N) where N is the number of elements returned", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "1.0.0", - "group": "list" - }, - "RPOPLPUSH": { - "summary": "Remove the last element in a list, prepend it to another list and return it", - "complexity": "O(1)", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - } - ], - "since": "1.2.0", - "group": "list" - }, - "LMOVE": { - "summary": "Pop an element from a list, push it to another list and return it", - "complexity": "O(1)", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - }, - { - "name": "wherefrom", - "type": "enum", - "enum": [ - "LEFT", - "RIGHT" - ] - }, - { - "name": "whereto", - "type": "enum", - "enum": [ - "LEFT", - "RIGHT" - ] - } - ], - "since": "6.2.0", - "group": "list" - }, - "RPUSH": { - "summary": "Append one or multiple elements to a list", - "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string", - "multiple": true - } - ], - "since": "1.0.0", - "group": "list" - }, - "RPUSHX": { - "summary": "Append an element to a list, only if the list exists", - "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "element", - "type": "string", - "multiple": true - } - ], - "since": "2.2.0", - "group": "list" - }, - "SADD": { - "summary": "Add one or more members to a set", - "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SAVE": { - "summary": "Synchronously save the dataset to disk", - "since": "1.0.0", - "group": "server" - }, - "SCARD": { - "summary": "Get the number of members in a set", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "set" - }, - "SCRIPT DEBUG": { - "summary": "Set the debug mode for executed scripts.", - "complexity": "O(1)", - "arguments": [ - { - "name": "mode", - "type": "enum", - "enum": [ - "YES", - "SYNC", - "NO" - ] - } - ], - "since": "3.2.0", - "group": "scripting" - }, - "SCRIPT EXISTS": { - "summary": "Check existence of scripts in the script cache.", - "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", - "arguments": [ - { - "name": "sha1", - "type": "string", - "multiple": true - } - ], - "since": "2.6.0", - "group": "scripting" - }, - "SCRIPT FLUSH": { - "summary": "Remove all the scripts from the script cache.", - "arguments": [ - { - "name": "async", - "type": "enum", - "enum": [ - "ASYNC", - "SYNC" - ], - "optional": true - } - ], - "complexity": "O(N) with N being the number of scripts in cache", - "since": "2.6.0", - "group": "scripting" - }, - "SCRIPT KILL": { - "summary": "Kill the script currently in execution.", - "complexity": "O(1)", - "since": "2.6.0", - "group": "scripting" - }, - "SCRIPT LOAD": { - "summary": "Load the specified Lua script into the script cache.", - "complexity": "O(N) with N being the length in bytes of the script body.", - "arguments": [ - { - "name": "script", - "type": "string" - } - ], - "since": "2.6.0", - "group": "scripting" - }, - "SDIFF": { - "summary": "Subtract multiple sets", - "complexity": "O(N) where N is the total number of elements in all given sets.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SDIFFSTORE": { - "summary": "Subtract multiple sets and store the resulting set in a key", - "complexity": "O(N) where N is the total number of elements in all given sets.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SELECT": { - "summary": "Change the selected database for the current connection", - "arguments": [ - { - "name": "index", - "type": "integer" - } - ], - "since": "1.0.0", - "group": "connection" - }, - "SET": { - "summary": "Set the string value of a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "value", - "type": "string" - }, - { - "name": "expiration", - "type": "enum", - "enum": [ - "EX seconds", - "PX milliseconds", - "EXAT timestamp", - "PXAT milliseconds-timestamp", - "KEEPTTL" - ], - "optional": true - }, - { - "name": "condition", - "type": "enum", - "enum": [ - "NX", - "XX" - ], - "optional": true - }, - { - "name": "get", - "type": "enum", - "enum": [ - "GET" - ], - "optional": true - } - ], - "since": "1.0.0", - "group": "string" - }, - "SETBIT": { - "summary": "Sets or clears the bit at offset in the string value stored at key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "offset", - "type": "integer" - }, - { - "name": "value", - "type": "integer" - } - ], - "since": "2.2.0", - "group": "bitmap" - }, - "SETEX": { - "summary": "Set the value and expiration of a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "seconds", - "type": "integer" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.0.0", - "group": "string" - }, - "SETNX": { - "summary": "Set the value of a key, only if the key does not exist", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "1.0.0", - "group": "string" - }, - "SETRANGE": { - "summary": "Overwrite part of a string at key starting at the specified offset", - "complexity": "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "offset", - "type": "integer" - }, - { - "name": "value", - "type": "string" - } - ], - "since": "2.2.0", - "group": "string" - }, - "SHUTDOWN": { - "summary": "Synchronously save the dataset to disk and then shut down the server", - "arguments": [ - { - "name": "save-mode", - "type": "enum", - "enum": [ - "NOSAVE", - "SAVE" - ], - "optional": true - } - ], - "since": "1.0.0", - "group": "server" - }, - "SINTER": { - "summary": "Intersect multiple sets", - "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SINTERSTORE": { - "summary": "Intersect multiple sets and store the resulting set in a key", - "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SISMEMBER": { - "summary": "Determine if a given value is a member of a set", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "1.0.0", - "group": "set" - }, - "SMISMEMBER": { - "summary": "Returns the membership associated with the given elements for a set", - "complexity": "O(N) where N is the number of elements being checked for membership", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "6.2.0", - "group": "set" - }, - "SLAVEOF": { - "summary": "Make the server a replica of another instance, or promote it as master. Deprecated starting with Redis 5. Use REPLICAOF instead.", - "arguments": [ - { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "string" - } - ], - "since": "1.0.0", - "group": "server" - }, - "REPLICAOF": { - "summary": "Make the server a replica of another instance, or promote it as master.", - "arguments": [ - { - "name": "host", - "type": "string" - }, - { - "name": "port", - "type": "string" - } - ], - "since": "5.0.0", - "group": "server" - }, - "SLOWLOG": { - "summary": "Manages the Redis slow queries log", - "arguments": [ - { - "name": "subcommand", - "type": "string" - }, - { - "name": "argument", - "type": "string", - "optional": true - } - ], - "since": "2.2.12", - "group": "server" - }, - "SMEMBERS": { - "summary": "Get all the members in a set", - "complexity": "O(N) where N is the set cardinality.", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "set" - }, - "SMOVE": { - "summary": "Move a member from one set to another", - "complexity": "O(1)", - "arguments": [ - { - "name": "source", - "type": "key" - }, - { - "name": "destination", - "type": "key" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "1.0.0", - "group": "set" - }, - "SORT": { - "summary": "Sort the elements in a list, set or sorted set", - "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is currently O(N) as there is a copy step that will be avoided in next releases.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "command": "BY", - "name": "pattern", - "type": "pattern", - "optional": true - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - }, - { - "command": "GET", - "name": "pattern", - "type": "string", - "optional": true, - "multiple": true - }, - { - "name": "order", - "type": "enum", - "enum": [ - "ASC", - "DESC" - ], - "optional": true - }, - { - "name": "sorting", - "type": "enum", - "enum": [ - "ALPHA" - ], - "optional": true - }, - { - "command": "STORE", - "name": "destination", - "type": "key", - "optional": true - } - ], - "since": "1.0.0", - "group": "generic" - }, - "SPOP": { - "summary": "Remove and return one or multiple random members from a set", - "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SRANDMEMBER": { - "summary": "Get one or multiple random members from a set", - "complexity": "Without the count argument O(1), otherwise O(N) where N is the absolute value of the passed count.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SREM": { - "summary": "Remove one or more members from a set", - "complexity": "O(N) where N is the number of members to be removed.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "STRALGO": { - "summary": "Run algorithms (currently LCS) against strings", - "complexity": "For LCS O(strlen(s1)*strlen(s2))", - "arguments": [ - { - "name": "algorithm", - "type": "enum", - "enum": [ - "LCS" - ] - }, - { - "name": "algo-specific-argument", - "type": "string", - "multiple": true - } - ], - "since": "6.0.0", - "group": "string" - }, - "STRLEN": { - "summary": "Get the length of the value stored in a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "2.2.0", - "group": "string" - }, - "SUBSCRIBE": { - "summary": "Listen for messages published to the given channels", - "complexity": "O(N) where N is the number of channels to subscribe to.", - "arguments": [ - { - "name": "channel", - "type": "string", - "multiple": true - } - ], - "since": "2.0.0", - "group": "pubsub" - }, - "SUNION": { - "summary": "Add multiple sets", - "complexity": "O(N) where N is the total number of elements in all given sets.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SUNIONSTORE": { - "summary": "Add multiple sets and store the resulting set in a key", - "complexity": "O(N) where N is the total number of elements in all given sets.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "1.0.0", - "group": "set" - }, - "SWAPDB": { - "summary": "Swaps two Redis databases", - "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.", - "arguments": [ - { - "name": "index1", - "type": "integer" - }, - { - "name": "index2", - "type": "integer" - } - ], - "since": "4.0.0", - "group": "server" - }, - "SYNC": { - "summary": "Internal command used for replication", - "since": "1.0.0", - "group": "server" - }, - "PSYNC": { - "summary": "Internal command used for replication", - "arguments": [ - { - "name": "replicationid", - "type": "integer" - }, - { - "name": "offset", - "type": "integer" - } - ], - "since": "2.8.0", - "group": "server" - }, - "TIME": { - "summary": "Return the current server time", - "complexity": "O(1)", - "since": "2.6.0", - "group": "server" - }, - "TOUCH": { - "summary": "Alters the last access time of a key(s). Returns the number of existing keys specified.", - "complexity": "O(N) where N is the number of keys that will be touched.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "3.2.1", - "group": "generic" - }, - "TTL": { - "summary": "Get the time to live for a key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "TYPE": { - "summary": "Determine the type stored at key", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.0.0", - "group": "generic" - }, - "UNSUBSCRIBE": { - "summary": "Stop listening for messages posted to the given channels", - "complexity": "O(N) where N is the number of clients already subscribed to a channel.", - "arguments": [ - { - "name": "channel", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.0.0", - "group": "pubsub" - }, - "UNLINK": { - "summary": "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", - "complexity": "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "4.0.0", - "group": "generic" - }, - "UNWATCH": { - "summary": "Forget about all watched keys", - "complexity": "O(1)", - "since": "2.2.0", - "group": "transactions" - }, - "WAIT": { - "summary": "Wait for the synchronous replication of all the write commands sent in the context of the current connection", - "complexity": "O(1)", - "arguments": [ - { - "name": "numreplicas", - "type": "integer" - }, - { - "name": "timeout", - "type": "integer" - } - ], - "since": "3.0.0", - "group": "generic" - }, - "WATCH": { - "summary": "Watch the given keys to determine execution of the MULTI/EXEC block", - "complexity": "O(1) for every key.", - "arguments": [ - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "2.2.0", - "group": "transactions" - }, - "ZADD": { - "summary": "Add one or more members to a sorted set, or update its score if it already exists", - "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "condition", - "type": "enum", - "enum": [ - "NX", - "XX" - ], - "optional": true - }, - { - "name": "comparison", - "type": "enum", - "enum": [ - "GT", - "LT" - ], - "optional": true - }, - { - "name": "change", - "type": "enum", - "enum": [ - "CH" - ], - "optional": true - }, - { - "name": "increment", - "type": "enum", - "enum": [ - "INCR" - ], - "optional": true - }, - { - "name": [ - "score", - "member" - ], - "type": [ - "double", - "string" - ], - "multiple": true - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZCARD": { - "summary": "Get the number of members in a sorted set", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZCOUNT": { - "summary": "Count the members in a sorted set with scores within the given values", - "complexity": "O(log(N)) with N being the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "double" - }, - { - "name": "max", - "type": "double" - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "ZDIFF": { - "summary": "Subtract multiple sorted sets", - "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", - "arguments": [ - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZDIFFSTORE": { - "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key", - "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZINCRBY": { - "summary": "Increment the score of a member in a sorted set", - "complexity": "O(log(N)) where N is the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "increment", - "type": "integer" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZINTER": { - "summary": "Intersect multiple sorted sets", - "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", - "arguments": [ - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "command": "WEIGHTS", - "name": "weight", - "type": "integer", - "variadic": true, - "optional": true - }, - { - "command": "AGGREGATE", - "name": "aggregate", - "type": "enum", - "enum": [ - "SUM", - "MIN", - "MAX" - ], - "optional": true - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZINTERSTORE": { - "summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key", - "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "command": "WEIGHTS", - "name": "weight", - "type": "integer", - "variadic": true, - "optional": true - }, - { - "command": "AGGREGATE", - "name": "aggregate", - "type": "enum", - "enum": [ - "SUM", - "MIN", - "MAX" - ], - "optional": true - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "ZLEXCOUNT": { - "summary": "Count the number of members in a sorted set between a given lexicographical range", - "complexity": "O(log(N)) with N being the number of elements in the sorted set.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "string" - }, - { - "name": "max", - "type": "string" - } - ], - "since": "2.8.9", - "group": "sorted_set" - }, - "ZPOPMAX": { - "summary": "Remove and return members with the highest scores in a sorted set", - "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "5.0.0", - "group": "sorted_set" - }, - "ZPOPMIN": { - "summary": "Remove and return members with the lowest scores in a sorted set", - "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "5.0.0", - "group": "sorted_set" - }, - "ZRANDMEMBER": { - "summary": "Get one or multiple random elements from a sorted set", - "complexity": "O(N) where N is the number of elements returned", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "options", - "type": "block", - "block": [ - { - "name": "count", - "type": "integer" - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" + [ + "5.0.0", + "Added the `ABSTTL` modifier." ], - "optional": true - } - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZRANGESTORE": { - "summary": "Store a range of members from sorted set into another key", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.", - "arguments": [ - { - "name": "dst", - "type": "key" - }, - { - "name": "src", - "type": "key" - }, - { - "name": "min", - "type": "string" - }, - { - "name": "max", - "type": "string" - }, - { - "name": "sortby", - "type": "enum", - "enum": [ - "BYSCORE", - "BYLEX" - ], - "optional": true - }, - { - "name": "rev", - "type": "enum", - "enum": [ - "REV" - ], - "optional": true - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZRANGE": { - "summary": "Return a range of members in a sorted set", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "string" - }, - { - "name": "max", - "type": "string" - }, - { - "name": "sortby", - "type": "enum", - "enum": [ - "BYSCORE", - "BYLEX" - ], - "optional": true - }, - { - "name": "rev", - "type": "enum", - "enum": [ - "REV" - ], - "optional": true - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZRANGEBYLEX": { - "summary": "Return a range of members in a sorted set, by lexicographical range", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "string" - }, - { - "name": "max", - "type": "string" - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "2.8.9", - "group": "sorted_set" - }, - "ZREVRANGEBYLEX": { - "summary": "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "max", - "type": "string" - }, - { - "name": "min", - "type": "string" - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "2.8.9", - "group": "sorted_set" - }, - "ZRANGEBYSCORE": { - "summary": "Return a range of members in a sorted set, by score", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "double" - }, - { - "name": "max", - "type": "double" - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "1.0.5", - "group": "sorted_set" - }, - "ZRANK": { - "summary": "Determine the index of a member in a sorted set", - "complexity": "O(log(N))", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "ZREM": { - "summary": "Remove one or more members from a sorted set", - "complexity": "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZREMRANGEBYLEX": { - "summary": "Remove all members in a sorted set between the given lexicographical range", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "string" - }, - { - "name": "max", - "type": "string" - } - ], - "since": "2.8.9", - "group": "sorted_set" - }, - "ZREMRANGEBYRANK": { - "summary": "Remove all members in a sorted set within the given indexes", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "integer" - }, - { - "name": "stop", - "type": "integer" - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "ZREMRANGEBYSCORE": { - "summary": "Remove all members in a sorted set within the given scores", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "min", - "type": "double" - }, - { - "name": "max", - "type": "double" - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZREVRANGE": { - "summary": "Return a range of members in a sorted set, by index, with scores ordered from high to low", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "integer" - }, - { - "name": "stop", - "type": "integer" - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZREVRANGEBYSCORE": { - "summary": "Return a range of members in a sorted set, by score, with scores ordered from high to low", - "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "max", - "type": "double" - }, - { - "name": "min", - "type": "double" - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - }, - { - "command": "LIMIT", - "name": [ - "offset", - "count" - ], - "type": [ - "integer", - "integer" - ], - "optional": true - } - ], - "since": "2.2.0", - "group": "sorted_set" - }, - "ZREVRANK": { - "summary": "Determine the index of a member in a sorted set, with scores ordered from high to low", - "complexity": "O(log(N))", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "ZSCORE": { - "summary": "Get the score associated with the given member in a sorted set", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string" - } - ], - "since": "1.2.0", - "group": "sorted_set" - }, - "ZUNION": { - "summary": "Add multiple sorted sets", - "complexity": "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", - "arguments": [ - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "command": "WEIGHTS", - "name": "weight", - "type": "integer", - "variadic": true, - "optional": true - }, - { - "command": "AGGREGATE", - "name": "aggregate", - "type": "enum", - "enum": [ - "SUM", - "MIN", - "MAX" - ], - "optional": true - }, - { - "name": "withscores", - "type": "enum", - "enum": [ - "WITHSCORES" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZMSCORE": { - "summary": "Get the score associated with the given members in a sorted set", - "complexity": "O(N) where N is the number of members being requested.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "member", - "type": "string", - "multiple": true - } - ], - "since": "6.2.0", - "group": "sorted_set" - }, - "ZUNIONSTORE": { - "summary": "Add multiple sorted sets and store the resulting sorted set in a new key", - "complexity": "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", - "arguments": [ - { - "name": "destination", - "type": "key" - }, - { - "name": "numkeys", - "type": "integer" - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "command": "WEIGHTS", - "name": "weight", - "type": "integer", - "variadic": true, - "optional": true - }, - { - "command": "AGGREGATE", - "name": "aggregate", - "type": "enum", - "enum": [ - "SUM", - "MIN", - "MAX" - ], - "optional": true - } - ], - "since": "2.0.0", - "group": "sorted_set" - }, - "SCAN": { - "summary": "Incrementally iterate the keys space", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", - "arguments": [ - { - "name": "cursor", - "type": "integer" - }, - { - "command": "MATCH", - "name": "pattern", - "type": "pattern", - "optional": true - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - }, - { - "command": "TYPE", - "name": "type", - "type": "string", - "optional": true - } - ], - "since": "2.8.0", - "group": "generic" - }, - "SSCAN": { - "summary": "Incrementally iterate Set elements", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "cursor", - "type": "integer" - }, - { - "command": "MATCH", - "name": "pattern", - "type": "pattern", - "optional": true - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "2.8.0", - "group": "set" - }, - "HSCAN": { - "summary": "Incrementally iterate hash fields and associated values", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "cursor", - "type": "integer" - }, - { - "command": "MATCH", - "name": "pattern", - "type": "pattern", - "optional": true - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "2.8.0", - "group": "hash" - }, - "ZSCAN": { - "summary": "Incrementally iterate sorted sets elements and associated scores", - "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "cursor", - "type": "integer" - }, - { - "command": "MATCH", - "name": "pattern", - "type": "pattern", - "optional": true - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "2.8.0", - "group": "sorted_set" - }, - "XINFO": { - "summary": "Get information on streams and consumer groups", - "complexity": "O(N) with N being the number of returned items for the subcommands CONSUMERS and GROUPS. The STREAM subcommand is O(log N) with N being the number of items in the stream.", - "arguments": [ - { - "command": "CONSUMERS", - "name": [ - "key", - "groupname" - ], - "type": [ - "key", - "string" - ], - "optional": true - }, - { - "command": "GROUPS", - "name": "key", - "type": "key", - "optional": true - }, - { - "command": "STREAM", - "name": "key", - "type": "key", - "optional": true - }, - { - "name": "help", - "type": "enum", - "enum": [ - "HELP" - ], - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XADD": { - "summary": "Appends a new entry to a stream", - "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "command": "NOMKSTREAM", - "optional": true - }, - { - "name": "trim", - "type": "block", - "optional": true, - "block": [ - { - "name": "strategy", - "type": "enum", - "enum": [ - "MAXLEN", - "MINID" + [ + "5.0.0", + "Added the `IDLETIME` and `FREQ` options." ] - }, - { - "name": "operator", - "type": "enum", - "enum": [ - "=", - "~" + ], + "acl_categories": [ + "@keyspace", + "@write", + "@slow", + "@dangerous" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "ttl", + "type": "integer" + }, + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "since": "3.0.0", + "optional": true + }, + { + "name": "absttl", + "type": "pure-token", + "token": "ABSTTL", + "since": "5.0.0", + "optional": true + }, + { + "name": "seconds", + "type": "integer", + "token": "IDLETIME", + "since": "5.0.0", + "optional": true + }, + { + "name": "frequency", + "type": "integer", + "token": "FREQ", + "since": "5.0.0", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "RESTORE-ASKING": { + "summary": "An internal command for migrating keys in a cluster", + "since": "3.0.0", + "group": "server", + "complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).", + "history": [ + [ + "3.0.0", + "Added the `REPLACE` modifier." ], - "optional": true - }, - { - "name": "threshold", - "type": "string" - }, - { - "command": "LIMIT", - "name": "count", - "type": "integer", - "optional": true - } - ] - }, - { - "type": "enum", - "enum": [ - "*", - "ID" - ] - }, - { - "name": [ - "field", - "value" - ], - "type": [ - "string", - "string" - ], - "multiple": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XTRIM": { - "summary": "Trims the stream to (approximately if '~' is passed) a certain size", - "complexity": "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "trim", - "type": "block", - "block": [ - { - "name": "strategy", - "type": "enum", - "enum": [ - "MAXLEN", - "MINID" + [ + "5.0.0", + "Added the `ABSTTL` modifier." + ], + [ + "5.0.0", + "Added the `IDLETIME` and `FREQ` options." + ] + ], + "acl_categories": [ + "@keyspace", + "@write", + "@slow", + "@dangerous" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "ttl", + "type": "integer" + }, + { + "name": "serialized-value", + "type": "string" + }, + { + "name": "replace", + "type": "pure-token", + "token": "REPLACE", + "since": "3.0.0", + "optional": true + }, + { + "name": "absttl", + "type": "pure-token", + "token": "ABSTTL", + "since": "5.0.0", + "optional": true + }, + { + "name": "seconds", + "type": "integer", + "token": "IDLETIME", + "since": "5.0.0", + "optional": true + }, + { + "name": "frequency", + "type": "integer", + "token": "FREQ", + "since": "5.0.0", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom", + "asking" + ], + "doc_flags": [ + "syscmd" + ] + }, + "ROLE": { + "summary": "Return the role of the instance in the context of replication", + "since": "2.8.12", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@fast", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast" + ] + }, + "RPOP": { + "summary": "Remove and get the last elements in a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(N) where N is the number of elements returned", + "history": [ + [ + "6.2.0", + "Added the `count` argument." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "since": "6.2.0", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "RPOPLPUSH": { + "summary": "Remove the last element in a list, prepend it to another list and return it", + "since": "1.2.0", + "group": "list", + "complexity": "O(1)", + "deprecated_since": "6.2.0", + "replaced_by": "`LMOVE` with the `RIGHT` and `LEFT` arguments", + "acl_categories": [ + "@write", + "@list", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + } + ], + "command_flags": [ + "write", + "denyoom" + ], + "doc_flags": [ + "deprecated" + ] + }, + "RPUSH": { + "summary": "Append one or multiple elements to a list", + "since": "1.0.0", + "group": "list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "history": [ + [ + "2.4.0", + "Accepts multiple `element` arguments." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "RPUSHX": { + "summary": "Append an element to a list, only if the list exists", + "since": "2.2.0", + "group": "list", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "history": [ + [ + "4.0.0", + "Accepts multiple `element` arguments." + ] + ], + "acl_categories": [ + "@write", + "@list", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "element", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "SADD": { + "summary": "Add one or more members to a set", + "since": "1.0.0", + "group": "set", + "complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.", + "history": [ + [ + "2.4.0", + "Accepts multiple `member` arguments." + ] + ], + "acl_categories": [ + "@write", + "@set", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "SAVE": { + "summary": "Synchronously save the dataset to disk", + "since": "1.0.0", + "group": "server", + "complexity": "O(N) where N is the total number of keys in all databases", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "admin", + "noscript", + "no_async_loading", + "no_multi" + ] + }, + "SCAN": { + "summary": "Incrementally iterate the keys space", + "since": "2.8.0", + "group": "generic", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection.", + "history": [ + [ + "6.0.0", + "Added the `TYPE` subcommand." + ] + ], + "acl_categories": [ + "@keyspace", + "@read", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "cursor", + "type": "integer" + }, + { + "name": "pattern", + "type": "pattern", + "token": "MATCH", + "optional": true + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + }, + { + "name": "type", + "type": "string", + "token": "TYPE", + "since": "6.0.0", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output", + "request_policy:special" + ] + }, + "SCARD": { + "summary": "Get the number of members in a set", + "since": "1.0.0", + "group": "set", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@set", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "SCRIPT": { + "summary": "A container for Lua scripts management commands", + "since": "2.6.0", + "group": "scripting", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "SCRIPT DEBUG": { + "summary": "Set the debug mode for executed scripts.", + "since": "3.2.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 3, + "arguments": [ + { + "name": "mode", + "type": "oneof", + "arguments": [ + { + "name": "yes", + "type": "pure-token", + "token": "YES" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + }, + { + "name": "no", + "type": "pure-token", + "token": "NO" + } + ] + } + ], + "command_flags": [ + "noscript" + ] + }, + "SCRIPT EXISTS": { + "summary": "Check existence of scripts in the script cache.", + "since": "2.6.0", + "group": "scripting", + "complexity": "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation).", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -3, + "arguments": [ + { + "name": "sha1", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:agg_logical_and" + ] + }, + "SCRIPT FLUSH": { + "summary": "Remove all the scripts from the script cache.", + "since": "2.6.0", + "group": "scripting", + "complexity": "O(N) with N being the number of scripts in cache", + "history": [ + [ + "6.2.0", + "Added the `ASYNC` and `SYNC` flushing mode modifiers." + ] + ], + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": -2, + "arguments": [ + { + "name": "async", + "type": "oneof", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "async", + "type": "pure-token", + "token": "ASYNC" + }, + { + "name": "sync", + "type": "pure-token", + "token": "SYNC" + } + ] + } + ], + "command_flags": [ + "noscript" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, + "SCRIPT HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "SCRIPT KILL": { + "summary": "Kill the script currently in execution.", + "since": "2.6.0", + "group": "scripting", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 2, + "command_flags": [ + "noscript", + "allow_busy" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:one_succeeded" + ] + }, + "SCRIPT LOAD": { + "summary": "Load the specified Lua script into the script cache.", + "since": "2.6.0", + "group": "scripting", + "complexity": "O(N) with N being the length in bytes of the script body.", + "acl_categories": [ + "@slow", + "@scripting" + ], + "arity": 3, + "arguments": [ + { + "name": "script", + "type": "string" + } + ], + "command_flags": [ + "noscript", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, + "SDIFF": { + "summary": "Subtract multiple sets", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "SDIFFSTORE": { + "summary": "Subtract multiple sets and store the resulting set in a key", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "acl_categories": [ + "@write", + "@set", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SELECT": { + "summary": "Change the selected database for the current connection", + "since": "1.0.0", + "group": "connection", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@connection" + ], + "arity": 2, + "arguments": [ + { + "name": "index", + "type": "integer" + } + ], + "command_flags": [ + "loading", + "stale", + "fast" + ] + }, + "SET": { + "summary": "Set the string value of a key", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "history": [ + [ + "2.6.12", + "Added the `EX`, `PX`, `NX` and `XX` options." + ], + [ + "6.0.0", + "Added the `KEEPTTL` option." + ], + [ + "6.2.0", + "Added the `GET`, `EXAT` and `PXAT` option." + ], + [ + "7.0.0", + "Allowed the `NX` and `GET` options to be used together." + ] + ], + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "notes": "RW and ACCESS due to the optional `GET` argument", + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true, + "variable_flags": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + }, + { + "name": "condition", + "type": "oneof", + "since": "2.6.12", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "get", + "type": "pure-token", + "token": "GET", + "since": "6.2.0", + "optional": true + }, + { + "name": "expiration", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "seconds", + "type": "integer", + "token": "EX", + "since": "2.6.12" + }, + { + "name": "milliseconds", + "type": "integer", + "token": "PX", + "since": "2.6.12" + }, + { + "name": "unix-time-seconds", + "type": "unix-time", + "token": "EXAT", + "since": "6.2.0" + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time", + "token": "PXAT", + "since": "6.2.0" + }, + { + "name": "keepttl", + "type": "pure-token", + "token": "KEEPTTL", + "since": "6.0.0" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SETBIT": { + "summary": "Sets or clears the bit at offset in the string value stored at key", + "since": "2.2.0", + "group": "bitmap", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@bitmap", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "integer" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SETEX": { + "summary": "Set the value and expiration of a key", + "since": "2.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "seconds", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SETNX": { + "summary": "Set the value of a key, only if the key does not exist", + "since": "1.0.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@string", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "SETRANGE": { + "summary": "Overwrite part of a string at key starting at the specified offset", + "since": "2.2.0", + "group": "string", + "complexity": "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument.", + "acl_categories": [ + "@write", + "@string", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "offset", + "type": "integer" + }, + { + "name": "value", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SHUTDOWN": { + "summary": "Synchronously save the dataset to disk and then shut down the server", + "since": "1.0.0", + "group": "server", + "complexity": "O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)", + "history": [ + [ + "7.0.0", + "Added the `NOW`, `FORCE` and `ABORT` modifiers." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -1, + "arguments": [ + { + "name": "nosave_save", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "nosave", + "type": "pure-token", + "token": "NOSAVE" + }, + { + "name": "save", + "type": "pure-token", + "token": "SAVE" + } + ] + }, + { + "name": "now", + "type": "pure-token", + "token": "NOW", + "since": "7.0.0", + "optional": true + }, + { + "name": "force", + "type": "pure-token", + "token": "FORCE", + "since": "7.0.0", + "optional": true + }, + { + "name": "abort", + "type": "pure-token", + "token": "ABORT", + "since": "7.0.0", + "optional": true + } + ], + "command_flags": [ + "admin", + "noscript", + "loading", + "stale", + "no_multi", + "allow_busy" + ] + }, + "SINTER": { + "summary": "Intersect multiple sets", + "since": "1.0.0", + "group": "set", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "SINTERCARD": { + "summary": "Intersect multiple sets and return the cardinality of the result", + "since": "7.0.0", + "group": "set", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "limit", + "type": "integer", + "token": "LIMIT", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "SINTERSTORE": { + "summary": "Intersect multiple sets and store the resulting set in a key", + "since": "1.0.0", + "group": "set", + "complexity": "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets.", + "acl_categories": [ + "@write", + "@set", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SISMEMBER": { + "summary": "Determine if a given value is a member of a set", + "since": "1.0.0", + "group": "set", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@set", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "SLAVEOF": { + "summary": "Make the server a replica of another instance, or promote it as master.", + "since": "1.0.0", + "group": "server", + "complexity": "O(1)", + "deprecated_since": "5.0.0", + "replaced_by": "`REPLICAOF`", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "host", + "type": "string" + }, + { + "name": "port", + "type": "integer" + } + ], + "command_flags": [ + "admin", + "noscript", + "stale", + "no_async_loading" + ], + "doc_flags": [ + "deprecated" + ] + }, + "SLOWLOG": { + "summary": "A container for slow log commands", + "since": "2.2.12", + "group": "server", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "SLOWLOG GET": { + "summary": "Get the slow log's entries", + "since": "2.2.12", + "group": "server", + "complexity": "O(N) where N is the number of entries returned", + "history": [ + [ + "4.0.0", + "Added client IP address, port and name to the reply." + ] + ], + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": -2, + "arguments": [ + { + "name": "count", + "type": "integer", + "optional": true + } + ], + "command_flags": [ + "admin", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "nondeterministic_output" + ] + }, + "SLOWLOG HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "6.2.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "SLOWLOG LEN": { + "summary": "Get the slow log's length", + "since": "2.2.12", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:agg_sum", + "nondeterministic_output" + ] + }, + "SLOWLOG RESET": { + "summary": "Clear all entries from the slow log", + "since": "2.2.12", + "group": "server", + "complexity": "O(N) where N is the number of entries in the slowlog", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 2, + "command_flags": [ + "admin", + "loading", + "stale" + ], + "hints": [ + "request_policy:all_nodes", + "response_policy:all_succeeded" + ] + }, + "SMEMBERS": { + "summary": "Get all the members in a set", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the set cardinality.", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "SMISMEMBER": { + "summary": "Returns the membership associated with the given elements for a set", + "since": "6.2.0", + "group": "set", + "complexity": "O(N) where N is the number of elements being checked for membership", + "acl_categories": [ + "@read", + "@set", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "SMOVE": { + "summary": "Move a member from one set to another", + "since": "1.0.0", + "group": "set", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@set", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "source", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "SORT": { + "summary": "Sort the elements in a list, set or sorted set", + "since": "1.0.0", + "group": "generic", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", + "acl_categories": [ + "@write", + "@set", + "@sortedset", + "@list", + "@slow", + "@dangerous" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + }, + { + "notes": "For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort", + "begin_search": { + "type": "unknown", + "spec": {} + }, + "find_keys": { + "type": "unknown", + "spec": {} + }, + "RO": true, + "access": true + }, + { + "notes": "For the optional STORE keyword. It is marked 'unknown' because the keyword can appear anywhere in the argument array", + "begin_search": { + "type": "unknown", + "spec": {} + }, + "find_keys": { + "type": "unknown", + "spec": {} + }, + "OW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "token": "BY", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "token": "GET", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "sorting", + "type": "pure-token", + "token": "ALPHA", + "optional": true + }, + { + "name": "destination", + "type": "key", + "key_spec_index": 2, + "token": "STORE", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ] + }, + "SORT_RO": { + "summary": "Sort the elements in a list, set or sorted set. Read-only variant of SORT.", + "since": "7.0.0", + "group": "generic", + "complexity": "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N).", + "acl_categories": [ + "@read", + "@set", + "@sortedset", + "@list", + "@slow", + "@dangerous" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + }, + { + "notes": "For the optional BY/GET keyword. It is marked 'unknown' because the key names derive from the content of the key we sort", + "begin_search": { + "type": "unknown", + "spec": {} + }, + "find_keys": { + "type": "unknown", + "spec": {} + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "token": "BY", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "name": "pattern", + "type": "pattern", + "key_spec_index": 1, + "token": "GET", + "optional": true, + "multiple": true, + "multiple_token": true + }, + { + "name": "order", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "asc", + "type": "pure-token", + "token": "ASC" + }, + { + "name": "desc", + "type": "pure-token", + "token": "DESC" + } + ] + }, + { + "name": "sorting", + "type": "pure-token", + "token": "ALPHA", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "SPOP": { + "summary": "Remove and return one or multiple random members from a set", + "since": "1.0.0", + "group": "set", + "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.", + "history": [ + [ + "3.2.0", + "Added the `count` argument." ] - }, - { - "name": "operator", - "type": "enum", - "enum": [ - "=", - "~" + ], + "acl_categories": [ + "@write", + "@set", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "since": "3.2.0", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "SPUBLISH": { + "summary": "Post a message to a shard channel", + "since": "7.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of clients subscribed to the receiving shard channel.", + "acl_categories": [ + "@pubsub", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "not_key": true + } + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string" + }, + { + "name": "message", + "type": "string" + } + ], + "command_flags": [ + "pubsub", + "loading", + "stale", + "fast" + ] + }, + "SRANDMEMBER": { + "summary": "Get one or multiple random members from a set", + "since": "1.0.0", + "group": "set", + "complexity": "Without the count argument O(1), otherwise O(N) where N is the absolute value of the passed count.", + "history": [ + [ + "2.6.0", + "Added the optional `count` argument." + ] + ], + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "since": "2.6.0", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "SREM": { + "summary": "Remove one or more members from a set", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the number of members to be removed.", + "history": [ + [ + "2.4.0", + "Accepts multiple `member` arguments." + ] + ], + "acl_categories": [ + "@write", + "@set", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "SSCAN": { + "summary": "Incrementally iterate Set elements", + "since": "2.8.0", + "group": "set", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "name": "pattern", + "type": "pattern", + "token": "MATCH", + "optional": true + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "SSUBSCRIBE": { + "summary": "Listen for messages published to the given shard channels", + "since": "7.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of shard channels to subscribe to.", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "not_key": true + } + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "STRLEN": { + "summary": "Get the length of the value stored in a key", + "since": "2.2.0", + "group": "string", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@string", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "SUBSCRIBE": { + "summary": "Listen for messages published to the given channels", + "since": "2.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of channels to subscribe to.", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -2, + "arguments": [ + { + "name": "channel", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "SUBSTR": { + "summary": "Get a substring of the string stored at a key", + "since": "1.0.0", + "group": "string", + "complexity": "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings.", + "deprecated_since": "2.0.0", + "replaced_by": "`GETRANGE`", + "acl_categories": [ + "@read", + "@string", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "end", + "type": "integer" + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "SUNION": { + "summary": "Add multiple sets", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "acl_categories": [ + "@read", + "@set", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output_order" + ] + }, + "SUNIONSTORE": { + "summary": "Add multiple sets and store the resulting set in a key", + "since": "1.0.0", + "group": "set", + "complexity": "O(N) where N is the total number of elements in all given sets.", + "acl_categories": [ + "@write", + "@set", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "SUNSUBSCRIBE": { + "summary": "Stop listening for messages posted to the given shard channels", + "since": "7.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of clients already subscribed to a shard channel.", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -1, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "not_key": true + } + ], + "arguments": [ + { + "name": "shardchannel", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "SWAPDB": { + "summary": "Swaps two Redis databases", + "since": "4.0.0", + "group": "server", + "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.", + "acl_categories": [ + "@keyspace", + "@write", + "@fast", + "@dangerous" + ], + "arity": 3, + "arguments": [ + { + "name": "index1", + "type": "integer" + }, + { + "name": "index2", + "type": "integer" + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "SYNC": { + "summary": "Internal command used for replication", + "since": "1.0.0", + "group": "server", + "acl_categories": [ + "@admin", + "@slow", + "@dangerous" + ], + "arity": 1, + "command_flags": [ + "admin", + "noscript", + "no_async_loading", + "no_multi" + ] + }, + "TIME": { + "summary": "Return the current server time", + "since": "2.6.0", + "group": "server", + "complexity": "O(1)", + "acl_categories": [ + "@fast" + ], + "arity": 1, + "command_flags": [ + "loading", + "stale", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "TOUCH": { + "summary": "Alters the last access time of a key(s). Returns the number of existing keys specified.", + "since": "3.2.1", + "group": "generic", + "complexity": "O(N) where N is the number of keys that will be touched.", + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:agg_sum" + ] + }, + "TTL": { + "summary": "Get the time to live for a key in seconds", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "history": [ + [ + "2.8.0", + "Added the -2 reply." + ] + ], + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "TYPE": { + "summary": "Determine the type stored at key", + "since": "1.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@keyspace", + "@read", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "UNLINK": { + "summary": "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", + "since": "4.0.0", + "group": "generic", + "complexity": "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.", + "acl_categories": [ + "@keyspace", + "@write", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RM": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ], + "hints": [ + "request_policy:multi_shard", + "response_policy:agg_sum" + ] + }, + "UNSUBSCRIBE": { + "summary": "Stop listening for messages posted to the given channels", + "since": "2.0.0", + "group": "pubsub", + "complexity": "O(N) where N is the number of clients already subscribed to a channel.", + "acl_categories": [ + "@pubsub", + "@slow" + ], + "arity": -1, + "arguments": [ + { + "name": "channel", + "type": "string", + "optional": true, + "multiple": true + } + ], + "command_flags": [ + "pubsub", + "noscript", + "loading", + "stale" + ] + }, + "UNWATCH": { + "summary": "Forget about all watched keys", + "since": "2.2.0", + "group": "transactions", + "complexity": "O(1)", + "acl_categories": [ + "@fast", + "@transaction" + ], + "arity": 1, + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "allow_busy" + ] + }, + "WAIT": { + "summary": "Wait for the synchronous replication of all the write commands sent in the context of the current connection", + "since": "3.0.0", + "group": "generic", + "complexity": "O(1)", + "acl_categories": [ + "@slow", + "@connection" + ], + "arity": 3, + "arguments": [ + { + "name": "numreplicas", + "type": "integer" + }, + { + "name": "timeout", + "type": "integer" + } + ], + "command_flags": [ + "noscript" + ], + "hints": [ + "request_policy:all_shards", + "response_policy:agg_min" + ] + }, + "WATCH": { + "summary": "Watch the given keys to determine execution of the MULTI/EXEC block", + "since": "2.2.0", + "group": "transactions", + "complexity": "O(1) for every key.", + "acl_categories": [ + "@fast", + "@transaction" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + } + ], + "command_flags": [ + "noscript", + "loading", + "stale", + "fast", + "allow_busy" + ] + }, + "XACK": { + "summary": "Marks a pending message as correctly processed, effectively removing it from the pending entries list of the consumer group. Return value of the command is the number of messages successfully acknowledged, that is, the IDs we were actually able to resolve in the PEL.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1) for each message ID processed.", + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "id", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "XADD": { + "summary": "Appends a new entry to a stream", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.", + "history": [ + [ + "6.2.0", + "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option." ], - "optional": true - }, - { - "name": "threshold", - "type": "string" - }, - { - "command": "LIMIT", - "name": "count", - "type": "integer", - "optional": true - } - ] - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XDEL": { - "summary": "Removes the specified entries from the stream. Returns the number of items actually deleted, that may be different from the number of IDs passed in case certain IDs do not exist.", - "complexity": "O(1) for each single item to delete in the stream, regardless of the stream size.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "ID", - "type": "string", - "multiple": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XRANGE": { - "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval", - "complexity": "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "start", - "type": "string" - }, - { - "name": "end", - "type": "string" - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XREVRANGE": { - "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval, in reverse order (from greater to smaller IDs) compared to XRANGE", - "complexity": "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "end", - "type": "string" - }, - { - "name": "start", - "type": "string" - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XLEN": { - "summary": "Return the number of entries in a stream", - "complexity": "O(1)", - "arguments": [ - { - "name": "key", - "type": "key" - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XREAD": { - "summary": "Return never seen elements in multiple streams, with IDs greater than the ones reported by the caller for each stream. Can block.", - "complexity": "For each stream mentioned: O(N) with N being the number of elements being returned, it means that XREAD-ing with a fixed COUNT is O(1). Note that when the BLOCK option is used, XADD will pay O(M) time in order to serve the M clients blocked on the stream getting new data.", - "arguments": [ - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - }, - { - "command": "BLOCK", - "name": "milliseconds", - "type": "integer", - "optional": true - }, - { - "name": "streams", - "type": "enum", - "enum": [ - "STREAMS" - ] - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "ID", - "type": "string", - "multiple": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XGROUP": { - "summary": "Create, destroy, and manage consumer groups.", - "complexity": "O(1) for all the subcommands, with the exception of the DESTROY subcommand which takes an additional O(M) time in order to delete the M entries inside the consumer group pending entries list (PEL).", - "arguments": [ - { - "name": "create", - "type": "block", - "block": [ - { - "command": "CREATE", - "name": [ - "key", - "groupname" + [ + "7.0.0", + "Added support for the `<ms>-*` explicit ID form." + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -5, + "key_specs": [ + { + "notes": "UPDATE instead of INSERT because of the optional trimming feature", + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "nomkstream", + "type": "pure-token", + "token": "NOMKSTREAM", + "since": "6.2.0", + "optional": true + }, + { + "name": "trim", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "strategy", + "type": "oneof", + "arguments": [ + { + "name": "maxlen", + "type": "pure-token", + "token": "MAXLEN" + }, + { + "name": "minid", + "type": "pure-token", + "token": "MINID", + "since": "6.2.0" + } + ] + }, + { + "name": "operator", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "equal", + "type": "pure-token", + "token": "=" + }, + { + "name": "approximately", + "type": "pure-token", + "token": "~" + } + ] + }, + { + "name": "threshold", + "type": "string" + }, + { + "name": "count", + "type": "integer", + "token": "LIMIT", + "since": "6.2.0", + "optional": true + } + ] + }, + { + "name": "id_or_auto", + "type": "oneof", + "arguments": [ + { + "name": "auto_id", + "type": "pure-token", + "token": "*" + }, + { + "name": "id", + "type": "string" + } + ] + }, + { + "name": "field_value", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "field", + "type": "string" + }, + { + "name": "value", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "XAUTOCLAIM": { + "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.", + "since": "6.2.0", + "group": "stream", + "complexity": "O(1) if COUNT is small.", + "history": [ + [ + "7.0.0", + "Added an element to the reply array, containing deleted entries the command cleared from the PEL" + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + }, + { + "name": "min-idle-time", + "type": "string" + }, + { + "name": "start", + "type": "string" + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + }, + { + "name": "justid", + "type": "pure-token", + "token": "JUSTID", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "XCLAIM": { + "summary": "Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(log N) with N being the number of messages in the PEL of the consumer group.", + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -6, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + }, + { + "name": "min-idle-time", + "type": "string" + }, + { + "name": "id", + "type": "string", + "multiple": true + }, + { + "name": "ms", + "type": "integer", + "token": "IDLE", + "optional": true + }, + { + "name": "unix-time-milliseconds", + "type": "unix-time", + "token": "TIME", + "optional": true + }, + { + "name": "count", + "type": "integer", + "token": "RETRYCOUNT", + "optional": true + }, + { + "name": "force", + "type": "pure-token", + "token": "FORCE", + "optional": true + }, + { + "name": "justid", + "type": "pure-token", + "token": "JUSTID", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "XDEL": { + "summary": "Removes the specified entries from the stream. Returns the number of items actually deleted, that may be different from the number of IDs passed in case certain IDs do not exist.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1) for each single item to delete in the stream, regardless of the stream size.", + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "id", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "XGROUP": { + "summary": "A container for consumer groups commands", + "since": "5.0.0", + "group": "stream", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "XGROUP CREATE": { + "summary": "Create a consumer group.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added the `entries_read` named argument." + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "id", + "type": "oneof", + "arguments": [ + { + "name": "id", + "type": "string" + }, + { + "name": "new_id", + "type": "pure-token", + "token": "$" + } + ] + }, + { + "name": "mkstream", + "type": "pure-token", + "token": "MKSTREAM", + "optional": true + }, + { + "name": "entries_read", + "type": "integer", + "token": "ENTRIESREAD", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "XGROUP CREATECONSUMER": { + "summary": "Create a consumer in a consumer group.", + "since": "6.2.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": 5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "insert": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "consumername", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "XGROUP DELCONSUMER": { + "summary": "Delete a consumer from a consumer group.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": 5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "consumername", + "type": "string" + } + ], + "command_flags": [ + "write" + ] + }, + "XGROUP DESTROY": { + "summary": "Destroy a consumer group.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(N) where N is the number of entries in the group's pending entries list (PEL).", + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + } + ], + "command_flags": [ + "write" + ] + }, + "XGROUP HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@stream", + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "XGROUP SETID": { + "summary": "Set a consumer group to an arbitrary last delivered ID value.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added the optional `entries_read` argument." + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + }, + { + "name": "id", + "type": "oneof", + "arguments": [ + { + "name": "id", + "type": "string" + }, + { + "name": "new_id", + "type": "pure-token", + "token": "$" + } + ] + }, + { + "name": "entries_read", + "type": "integer", + "token": "ENTRIESREAD", + "optional": true + } + ], + "command_flags": [ + "write" + ] + }, + "XINFO": { + "summary": "A container for stream introspection commands", + "since": "5.0.0", + "group": "stream", + "complexity": "Depends on subcommand.", + "acl_categories": [ + "@slow" + ], + "arity": -2 + }, + "XINFO CONSUMERS": { + "summary": "List the consumers in a consumer group", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "groupname", + "type": "string" + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "XINFO GROUPS": { + "summary": "List the consumer groups of a stream", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added the `entries-read` and `lag` fields" + ] + ], + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly" + ] + }, + "XINFO HELP": { + "summary": "Show helpful text about the different subcommands", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@stream", + "@slow" + ], + "arity": 2, + "command_flags": [ + "loading", + "stale" + ] + }, + "XINFO STREAM": { + "summary": "Get information about a stream", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "history": [ + [ + "6.0.0", + "Added the `FULL` modifier." ], - "type": [ - "key", - "string" + [ + "7.0.0", + "Added the `max-deleted-entry-id`, `entries-added`, `recorded-first-entry-id`, `entries-read` and `lag` fields" ] - }, - { - "name": "id", - "type": "enum", - "enum": [ - "ID", - "$" + ], + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "full", + "type": "block", + "token": "FULL", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ] + } + ], + "command_flags": [ + "readonly" + ] + }, + "XLEN": { + "summary": "Return the number of entries in a stream", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@stream", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "XPENDING": { + "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", + "since": "5.0.0", + "group": "stream", + "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.", + "history": [ + [ + "6.2.0", + "Added the `IDLE` option and exclusive range intervals." ] - }, - { - "command": "MKSTREAM", - "optional": true - } - ], - "optional": true - }, - { - "name": "setid", - "type": "block", - "block": [ - { - "command": "SETID", - "name": [ - "key", - "groupname" + ], + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "group", + "type": "string" + }, + { + "name": "filters", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "min-idle-time", + "type": "integer", + "token": "IDLE", + "since": "6.2.0", + "optional": true + }, + { + "name": "start", + "type": "string" + }, + { + "name": "end", + "type": "string" + }, + { + "name": "count", + "type": "integer" + }, + { + "name": "consumer", + "type": "string", + "optional": true + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "XRANGE": { + "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval", + "since": "5.0.0", + "group": "stream", + "complexity": "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", + "history": [ + [ + "6.2.0", + "Added exclusive ranges." + ] + ], + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "string" + }, + { + "name": "end", + "type": "string" + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "XREAD": { + "summary": "Return never seen elements in multiple streams, with IDs greater than the ones reported by the caller for each stream. Can block.", + "since": "5.0.0", + "group": "stream", + "complexity": "For each stream mentioned: O(N) with N being the number of elements being returned, it means that XREAD-ing with a fixed COUNT is O(1). Note that when the BLOCK option is used, XADD will pay O(M) time in order to serve the M clients blocked on the stream getting new data.", + "acl_categories": [ + "@read", + "@stream", + "@slow", + "@blocking" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STREAMS", + "startfrom": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 2 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + }, + { + "name": "milliseconds", + "type": "integer", + "token": "BLOCK", + "optional": true + }, + { + "name": "streams", + "type": "block", + "token": "STREAMS", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "id", + "type": "string", + "multiple": true + } + ] + } + ], + "command_flags": [ + "readonly", + "blocking", + "movablekeys" + ] + }, + "XREADGROUP": { + "summary": "Return new entries from a stream using a consumer group, or access the history of the pending entries for a given consumer. Can block.", + "since": "5.0.0", + "group": "stream", + "complexity": "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data.", + "acl_categories": [ + "@write", + "@stream", + "@slow", + "@blocking" + ], + "arity": -7, + "key_specs": [ + { + "begin_search": { + "type": "keyword", + "spec": { + "keyword": "STREAMS", + "startfrom": 4 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": -1, + "keystep": 1, + "limit": 2 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "group_consumer", + "type": "block", + "token": "GROUP", + "arguments": [ + { + "name": "group", + "type": "string" + }, + { + "name": "consumer", + "type": "string" + } + ] + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + }, + { + "name": "milliseconds", + "type": "integer", + "token": "BLOCK", + "optional": true + }, + { + "name": "noack", + "type": "pure-token", + "token": "NOACK", + "optional": true + }, + { + "name": "streams", + "type": "block", + "token": "STREAMS", + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "id", + "type": "string", + "multiple": true + } + ] + } + ], + "command_flags": [ + "write", + "blocking", + "movablekeys" + ] + }, + "XREVRANGE": { + "summary": "Return a range of elements in a stream, with IDs matching the specified IDs interval, in reverse order (from greater to smaller IDs) compared to XRANGE", + "since": "5.0.0", + "group": "stream", + "complexity": "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1).", + "history": [ + [ + "6.2.0", + "Added exclusive ranges." + ] + ], + "acl_categories": [ + "@read", + "@stream", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "end", + "type": "string" + }, + { + "name": "start", + "type": "string" + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "XSETID": { + "summary": "An internal command for replicating stream values", + "since": "5.0.0", + "group": "stream", + "complexity": "O(1)", + "history": [ + [ + "7.0.0", + "Added the `entries_added` and `max_deleted_entry_id` arguments." + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "last-id", + "type": "string" + }, + { + "name": "entries_added", + "type": "integer", + "token": "ENTRIESADDED", + "optional": true + }, + { + "name": "max_deleted_entry_id", + "type": "string", + "token": "MAXDELETEDID", + "optional": true + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "XTRIM": { + "summary": "Trims the stream to (approximately if '~' is passed) a certain size", + "since": "5.0.0", + "group": "stream", + "complexity": "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation.", + "history": [ + [ + "6.2.0", + "Added the `MINID` trimming strategy and the `LIMIT` option." + ] + ], + "acl_categories": [ + "@write", + "@stream", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "trim", + "type": "block", + "arguments": [ + { + "name": "strategy", + "type": "oneof", + "arguments": [ + { + "name": "maxlen", + "type": "pure-token", + "token": "MAXLEN" + }, + { + "name": "minid", + "type": "pure-token", + "token": "MINID", + "since": "6.2.0" + } + ] + }, + { + "name": "operator", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "equal", + "type": "pure-token", + "token": "=" + }, + { + "name": "approximately", + "type": "pure-token", + "token": "~" + } + ] + }, + { + "name": "threshold", + "type": "string" + }, + { + "name": "count", + "type": "integer", + "token": "LIMIT", + "since": "6.2.0", + "optional": true + } + ] + } + ], + "command_flags": [ + "write" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "ZADD": { + "summary": "Add one or more members to a sorted set, or update its score if it already exists", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(log(N)) for each item added, where N is the number of elements in the sorted set.", + "history": [ + [ + "2.4.0", + "Accepts multiple elements." + ], + [ + "3.0.2", + "Added the `XX`, `NX`, `CH` and `INCR` options." ], - "type": [ - "key", - "string" + [ + "6.2.0", + "Added the `GT` and `LT` options." ] - }, - { - "name": "id", - "type": "enum", - "enum": [ - "ID", - "$" + ], + "acl_categories": [ + "@write", + "@sortedset", + "@fast" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "condition", + "type": "oneof", + "since": "3.0.2", + "optional": true, + "arguments": [ + { + "name": "nx", + "type": "pure-token", + "token": "NX" + }, + { + "name": "xx", + "type": "pure-token", + "token": "XX" + } + ] + }, + { + "name": "comparison", + "type": "oneof", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "gt", + "type": "pure-token", + "token": "GT" + }, + { + "name": "lt", + "type": "pure-token", + "token": "LT" + } + ] + }, + { + "name": "change", + "type": "pure-token", + "token": "CH", + "since": "3.0.2", + "optional": true + }, + { + "name": "increment", + "type": "pure-token", + "token": "INCR", + "since": "3.0.2", + "optional": true + }, + { + "name": "score_member", + "type": "block", + "multiple": true, + "arguments": [ + { + "name": "score", + "type": "double" + }, + { + "name": "member", + "type": "string" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "ZCARD": { + "summary": "Get the number of members in a sorted set", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZCOUNT": { + "summary": "Count the members in a sorted set with scores within the given values", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZDIFF": { + "summary": "Subtract multiple sorted sets", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "ZDIFFSTORE": { + "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ] + }, + "ZINCRBY": { + "summary": "Increment the score of a member in a sorted set", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(log(N)) where N is the number of elements in the sorted set.", + "acl_categories": [ + "@write", + "@sortedset", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "update": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "increment", + "type": "integer" + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "write", + "denyoom", + "fast" + ] + }, + "ZINTER": { + "summary": "Intersect multiple sorted sets", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "weight", + "type": "integer", + "token": "WEIGHTS", + "optional": true, + "multiple": true + }, + { + "name": "aggregate", + "type": "oneof", + "token": "AGGREGATE", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "ZINTERCARD": { + "summary": "Intersect multiple sorted sets and return the cardinality of the result", + "since": "7.0.0", + "group": "sorted-set", + "complexity": "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets.", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "limit", + "type": "integer", + "token": "LIMIT", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "ZINTERSTORE": { + "summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + }, + { + "name": "weight", + "type": "integer", + "token": "WEIGHTS", + "optional": true, + "multiple": true + }, + { + "name": "aggregate", + "type": "oneof", + "token": "AGGREGATE", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ] + }, + "ZLEXCOUNT": { + "summary": "Count the number of members in a sorted set between a given lexicographical range", + "since": "2.8.9", + "group": "sorted-set", + "complexity": "O(log(N)) with N being the number of elements in the sorted set.", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZMPOP": { + "summary": "Remove and return members with scores in a sorted set", + "since": "7.0.0", + "group": "sorted-set", + "complexity": "O(K) + O(N*log(M)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "where", + "type": "oneof", + "arguments": [ + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "write", + "movablekeys" + ] + }, + "ZMSCORE": { + "summary": "Get the score associated with the given members in a sorted set", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(N) where N is the number of members being requested.", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZPOPMAX": { + "summary": "Remove and return members with the highest scores in a sorted set", + "since": "5.0.0", + "group": "sorted-set", + "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", + "acl_categories": [ + "@write", + "@sortedset", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "ZPOPMIN": { + "summary": "Remove and return members with the lowest scores in a sorted set", + "since": "5.0.0", + "group": "sorted-set", + "complexity": "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped.", + "acl_categories": [ + "@write", + "@sortedset", + "@fast" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "access": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "count", + "type": "integer", + "optional": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "ZRANDMEMBER": { + "summary": "Get one or multiple random elements from a sorted set", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(N) where N is the number of elements returned", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -2, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "options", + "type": "block", + "optional": true, + "arguments": [ + { + "name": "count", + "type": "integer" + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "ZRANGE": { + "summary": "Return a range of members in a sorted set", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", + "history": [ + [ + "6.2.0", + "Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options." + ] + ], + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "string" + }, + { + "name": "stop", + "type": "string" + }, + { + "name": "sortby", + "type": "oneof", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "byscore", + "type": "pure-token", + "token": "BYSCORE" + }, + { + "name": "bylex", + "type": "pure-token", + "token": "BYLEX" + } + ] + }, + { + "name": "rev", + "type": "pure-token", + "token": "REV", + "since": "6.2.0", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "since": "6.2.0", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ], + "command_flags": [ + "readonly" + ] + }, + "ZRANGEBYLEX": { + "summary": "Return a range of members in a sorted set, by lexicographical range", + "since": "2.8.9", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `BYLEX` argument", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "ZRANGEBYSCORE": { + "summary": "Return a range of members in a sorted set, by score", + "since": "1.0.5", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `BYSCORE` argument", + "history": [ + [ + "2.0.0", + "Added the `WITHSCORES` modifier." + ] + ], + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "since": "2.0.0", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "ZRANGESTORE": { + "summary": "Store a range of members from sorted set into another key", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": -5, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "dst", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "src", + "type": "key", + "key_spec_index": 1 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + }, + { + "name": "sortby", + "type": "oneof", + "optional": true, + "arguments": [ + { + "name": "byscore", + "type": "pure-token", + "token": "BYSCORE" + }, + { + "name": "bylex", + "type": "pure-token", + "token": "BYLEX" + } + ] + }, + { + "name": "rev", + "type": "pure-token", + "token": "REV", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom" + ] + }, + "ZRANK": { + "summary": "Determine the index of a member in a sorted set", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(log(N))", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZREM": { + "summary": "Remove one or more members from a sorted set", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed.", + "history": [ + [ + "2.4.0", + "Accepts multiple elements." + ] + ], + "acl_categories": [ + "@write", + "@sortedset", + "@fast" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string", + "multiple": true + } + ], + "command_flags": [ + "write", + "fast" + ] + }, + "ZREMRANGEBYLEX": { + "summary": "Remove all members in a sorted set between the given lexicographical range", + "since": "2.8.9", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "string" + }, + { + "name": "max", + "type": "string" + } + ], + "command_flags": [ + "write" + ] + }, + "ZREMRANGEBYRANK": { + "summary": "Remove all members in a sorted set within the given indexes", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + } + ], + "command_flags": [ + "write" + ] + }, + "ZREMRANGEBYSCORE": { + "summary": "Remove all members in a sorted set within the given scores", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": 4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RW": true, + "delete": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "min", + "type": "double" + }, + { + "name": "max", + "type": "double" + } + ], + "command_flags": [ + "write" + ] + }, + "ZREVRANGE": { + "summary": "Return a range of members in a sorted set, by index, with scores ordered from high to low", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` argument", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "start", + "type": "integer" + }, + { + "name": "stop", + "type": "integer" + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "ZREVRANGEBYLEX": { + "summary": "Return a range of members in a sorted set, by lexicographical range, ordered from higher to lower strings.", + "since": "2.8.9", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` and `BYLEX` arguments", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "max", + "type": "string" + }, + { + "name": "min", + "type": "string" + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "ZREVRANGEBYSCORE": { + "summary": "Return a range of members in a sorted set, by score, with scores ordered from high to low", + "since": "2.2.0", + "group": "sorted-set", + "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", + "deprecated_since": "6.2.0", + "replaced_by": "`ZRANGE` with the `REV` and `BYSCORE` arguments", + "history": [ + [ + "2.1.6", + "`min` and `max` can be exclusive." ] - } - ], - "optional": true - }, - { - "command": "DESTROY", - "name": [ - "key", - "groupname" - ], - "type": [ - "key", - "string" - ], - "optional": true - }, - { - "command": "CREATECONSUMER", - "name": [ - "key", - "groupname", - "consumername" - ], - "type": [ - "key", - "string", - "string" - ], - "optional": true - }, - { - "command": "DELCONSUMER", - "name": [ - "key", - "groupname", - "consumername" - ], - "type": [ - "key", - "string", - "string" - ], - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XREADGROUP": { - "summary": "Return new entries from a stream using a consumer group, or access the history of the pending entries for a given consumer. Can block.", - "complexity": "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data.", - "arguments": [ - { - "command": "GROUP", - "name": [ - "group", - "consumer" - ], - "type": [ - "string", - "string" - ] - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - }, - { - "command": "BLOCK", - "name": "milliseconds", - "type": "integer", - "optional": true - }, - { - "name": "noack", - "type": "enum", - "enum": [ - "NOACK" - ], - "optional": true - }, - { - "name": "streams", - "type": "enum", - "enum": [ - "STREAMS" - ] - }, - { - "name": "key", - "type": "key", - "multiple": true - }, - { - "name": "ID", - "type": "string", - "multiple": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XACK": { - "summary": "Marks a pending message as correctly processed, effectively removing it from the pending entries list of the consumer group. Return value of the command is the number of messages successfully acknowledged, that is, the IDs we were actually able to resolve in the PEL.", - "complexity": "O(1) for each message ID processed.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "group", - "type": "string" - }, - { - "name": "ID", - "type": "string", - "multiple": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XCLAIM": { - "summary": "Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.", - "complexity": "O(log N) with N being the number of messages in the PEL of the consumer group.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "group", - "type": "string" - }, - { - "name": "consumer", - "type": "string" - }, - { - "name": "min-idle-time", - "type": "string" - }, - { - "name": "ID", - "type": "string", - "multiple": true - }, - { - "command": "IDLE", - "name": "ms", - "type": "integer", - "optional": true - }, - { - "command": "TIME", - "name": "ms-unix-time", - "type": "integer", - "optional": true - }, - { - "command": "RETRYCOUNT", - "name": "count", - "type": "integer", - "optional": true - }, - { - "name": "force", - "enum": [ - "FORCE" - ], - "optional": true - }, - { - "name": "justid", - "enum": [ - "JUSTID" - ], - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "XAUTOCLAIM": { - "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.", - "complexity": "O(1) if COUNT is small.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "group", - "type": "string" - }, - { - "name": "consumer", - "type": "string" - }, - { - "name": "min-idle-time", - "type": "string" - }, - { - "name": "start", - "type": "string" - }, - { - "command": "COUNT", - "name": "count", - "type": "integer", - "optional": true - }, - { - "name": "justid", - "enum": [ - "JUSTID" - ], - "optional": true - } - ], - "since": "6.2.0", - "group": "stream" - }, - "XPENDING": { - "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", - "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.", - "arguments": [ - { - "name": "key", - "type": "key" - }, - { - "name": "group", - "type": "string" - }, - { - "type": "block", - "name": "filters", - "block": [ - { - "command": "IDLE", - "name": "min-idle-time", - "type": "integer", - "optional": true - }, - { - "name": "start", - "type": "string" - }, - { - "name": "end", - "type": "string" - }, - { - "name": "count", - "type": "integer" - }, - { - "name": "consumer", - "type": "string", - "optional": true - } - ], - "optional": true - } - ], - "since": "5.0.0", - "group": "stream" - }, - "LATENCY DOCTOR": { - "summary": "Return a human readable latency analysis report.", - "since": "2.8.13", - "group": "server" - }, - "LATENCY GRAPH": { - "summary": "Return a latency graph for the event.", - "arguments": [ - { - "name": "event", - "type": "string" - } - ], - "since": "2.8.13", - "group": "server" - }, - "LATENCY HISTORY": { - "summary": "Return timestamp-latency samples for the event.", - "arguments": [ - { - "name": "event", - "type": "string" - } - ], - "since": "2.8.13", - "group": "server" - }, - "LATENCY LATEST": { - "summary": "Return the latest latency samples for all events.", - "since": "2.8.13", - "group": "server" - }, - "LATENCY RESET": { - "summary": "Reset latency data for one or more events.", - "arguments": [ - { - "name": "event", - "type": "string", - "optional": true, - "multiple": true - } - ], - "since": "2.8.13", - "group": "server" - }, - "LATENCY HELP": { - "summary": "Show helpful text about the different subcommands.", - "since": "2.8.13", - "group": "server" - } + ], + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "max", + "type": "double" + }, + { + "name": "min", + "type": "double" + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + }, + { + "name": "offset_count", + "type": "block", + "token": "LIMIT", + "optional": true, + "arguments": [ + { + "name": "offset", + "type": "integer" + }, + { + "name": "count", + "type": "integer" + } + ] + } + ], + "command_flags": [ + "readonly" + ], + "doc_flags": [ + "deprecated" + ] + }, + "ZREVRANK": { + "summary": "Determine the index of a member in a sorted set, with scores ordered from high to low", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(log(N))", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZSCAN": { + "summary": "Incrementally iterate sorted sets elements and associated scores", + "since": "2.8.0", + "group": "sorted-set", + "complexity": "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection..", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "cursor", + "type": "integer" + }, + { + "name": "pattern", + "type": "pattern", + "token": "MATCH", + "optional": true + }, + { + "name": "count", + "type": "integer", + "token": "COUNT", + "optional": true + } + ], + "command_flags": [ + "readonly" + ], + "hints": [ + "nondeterministic_output" + ] + }, + "ZSCORE": { + "summary": "Get the score associated with the given member in a sorted set", + "since": "1.2.0", + "group": "sorted-set", + "complexity": "O(1)", + "acl_categories": [ + "@read", + "@sortedset", + "@fast" + ], + "arity": 3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "key", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "member", + "type": "string" + } + ], + "command_flags": [ + "readonly", + "fast" + ] + }, + "ZUNION": { + "summary": "Add multiple sorted sets", + "since": "6.2.0", + "group": "sorted-set", + "complexity": "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", + "acl_categories": [ + "@read", + "@sortedset", + "@slow" + ], + "arity": -3, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 0, + "multiple": true + }, + { + "name": "weight", + "type": "integer", + "token": "WEIGHTS", + "optional": true, + "multiple": true + }, + { + "name": "aggregate", + "type": "oneof", + "token": "AGGREGATE", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + }, + { + "name": "withscores", + "type": "pure-token", + "token": "WITHSCORES", + "optional": true + } + ], + "command_flags": [ + "readonly", + "movablekeys" + ] + }, + "ZUNIONSTORE": { + "summary": "Add multiple sorted sets and store the resulting sorted set in a new key", + "since": "2.0.0", + "group": "sorted-set", + "complexity": "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.", + "acl_categories": [ + "@write", + "@sortedset", + "@slow" + ], + "arity": -4, + "key_specs": [ + { + "begin_search": { + "type": "index", + "spec": { + "index": 1 + } + }, + "find_keys": { + "type": "range", + "spec": { + "lastkey": 0, + "keystep": 1, + "limit": 0 + } + }, + "OW": true, + "update": true + }, + { + "begin_search": { + "type": "index", + "spec": { + "index": 2 + } + }, + "find_keys": { + "type": "keynum", + "spec": { + "keynumidx": 0, + "firstkey": 1, + "keystep": 1 + } + }, + "RO": true, + "access": true + } + ], + "arguments": [ + { + "name": "destination", + "type": "key", + "key_spec_index": 0 + }, + { + "name": "numkeys", + "type": "integer" + }, + { + "name": "key", + "type": "key", + "key_spec_index": 1, + "multiple": true + }, + { + "name": "weight", + "type": "integer", + "token": "WEIGHTS", + "optional": true, + "multiple": true + }, + { + "name": "aggregate", + "type": "oneof", + "token": "AGGREGATE", + "optional": true, + "arguments": [ + { + "name": "sum", + "type": "pure-token", + "token": "SUM" + }, + { + "name": "min", + "type": "pure-token", + "token": "MIN" + }, + { + "name": "max", + "type": "pure-token", + "token": "MAX" + } + ] + } + ], + "command_flags": [ + "write", + "denyoom", + "movablekeys" + ] + } } diff --git a/iredis/data/commands/_index.md b/iredis/data/commands/_index.md new file mode 100644 index 0000000..c963623 --- /dev/null +++ b/iredis/data/commands/_index.md @@ -0,0 +1,4 @@ +--- +title: "Redis Commands" +linkTitle: "Commands" +--- diff --git a/iredis/data/commands/acl-cat.md b/iredis/data/commands/acl-cat.md index eedf692..0eb256f 100644 --- a/iredis/data/commands/acl-cat.md +++ b/iredis/data/commands/acl-cat.md @@ -1,6 +1,6 @@ -The command shows the available ACL categories if called without arguments. If a -category name is given, the command shows all the Redis commands in the -specified category. +The command shows the available ACL categories if called without arguments. +If a category name is given, the command shows all the Redis commands in +the specified category. ACL categories are very useful in order to create ACL rules that include or exclude a large set of commands at once, without specifying every single @@ -10,8 +10,8 @@ stability: ACL SETUSER karin on +@all -@dangerous -We first add all the commands to the set of commands that `karin` is able to -execute, but then we remove all the dangerous commands. +We first add all the commands to the set of commands that `karin` is able +to execute, but then we remove all the dangerous commands. Checking for all the available categories is as simple as: @@ -79,6 +79,4 @@ Then we may want to know what commands are part of a given category: @return -@array-reply: a list of ACL categories or a list of commands inside a given -category. The command may return an error if an invalid category name is given -as argument. +@array-reply: a list of ACL categories or a list of commands inside a given category. The command may return an error if an invalid category name is given as argument. diff --git a/iredis/data/commands/acl-deluser.md b/iredis/data/commands/acl-deluser.md index 88359fe..e3f443e 100644 --- a/iredis/data/commands/acl-deluser.md +++ b/iredis/data/commands/acl-deluser.md @@ -1,13 +1,12 @@ Delete all the specified ACL users and terminate all the connections that are authenticated with such users. Note: the special `default` user cannot be -removed from the system, this is the default user that every new connection is -authenticated with. The list of users may include usernames that do not exist, -in such case no operation is performed for the non existing users. +removed from the system, this is the default user that every new connection +is authenticated with. The list of users may include usernames that do not +exist, in such case no operation is performed for the non existing users. @return -@integer-reply: The number of users that were deleted. This number will not -always match the number of arguments since certain users may not exist. +@integer-reply: The number of users that were deleted. This number will not always match the number of arguments since certain users may not exist. @examples diff --git a/iredis/data/commands/acl-dryrun.md b/iredis/data/commands/acl-dryrun.md new file mode 100644 index 0000000..4afb3cd --- /dev/null +++ b/iredis/data/commands/acl-dryrun.md @@ -0,0 +1,18 @@ +Simulate the execution of a given command by a given user. +This command can be used to test the permissions of a given user without having to enable the user or cause the side effects of running the command. + +@return + +@simple-string-reply: `OK` on success. +@bulk-string-reply: An error describing why the user can't execute the command. + +@examples + +``` +> ACL SETUSER VIRGINIA +SET ~* +"OK" +> ACL DRYRUN VIRGINIA SET foo bar +"OK" +> ACL DRYRUN VIRGINIA GET foo bar +"This user has no permissions to run the 'GET' command" +``` diff --git a/iredis/data/commands/acl-genpass.md b/iredis/data/commands/acl-genpass.md index 46043cc..2afbaec 100644 --- a/iredis/data/commands/acl-genpass.md +++ b/iredis/data/commands/acl-genpass.md @@ -1,33 +1,33 @@ ACL users need a solid password in order to authenticate to the server without -security risks. Such password does not need to be remembered by humans, but only -by computers, so it can be very long and strong (unguessable by an external -attacker). The `ACL GENPASS` command generates a password starting from -/dev/urandom if available, otherwise (in systems without /dev/urandom) it uses a -weaker system that is likely still better than picking a weak password by hand. - -By default (if /dev/urandom is available) the password is strong and can be used -for other uses in the context of a Redis application, for instance in order to -create unique session identifiers or other kind of unguessable and not colliding -IDs. The password generation is also very cheap because we don't really ask -/dev/urandom for bits at every execution. At startup Redis creates a seed using -/dev/urandom, then it will use SHA256 in counter mode, with -HMAC-SHA256(seed,counter) as primitive, in order to create more random bytes as -needed. This means that the application developer should be feel free to abuse -`ACL GENPASS` to create as many secure pseudorandom strings as needed. - -The command output is an hexadecimal representation of a binary string. By -default it emits 256 bits (so 64 hex characters). The user can provide an -argument in form of number of bits to emit from 1 to 1024 to change the output -length. Note that the number of bits provided is always rounded to the next -multiple of 4. So for instance asking for just 1 bit password will result in 4 -bits to be emitted, in the form of a single hex character. +security risks. Such password does not need to be remembered by humans, but +only by computers, so it can be very long and strong (unguessable by an +external attacker). The `ACL GENPASS` command generates a password starting +from /dev/urandom if available, otherwise (in systems without /dev/urandom) it +uses a weaker system that is likely still better than picking a weak password +by hand. + +By default (if /dev/urandom is available) the password is strong and +can be used for other uses in the context of a Redis application, for +instance in order to create unique session identifiers or other kind of +unguessable and not colliding IDs. The password generation is also very cheap +because we don't really ask /dev/urandom for bits at every execution. At +startup Redis creates a seed using /dev/urandom, then it will use SHA256 +in counter mode, with HMAC-SHA256(seed,counter) as primitive, in order to +create more random bytes as needed. This means that the application developer +should be feel free to abuse `ACL GENPASS` to create as many secure +pseudorandom strings as needed. + +The command output is a hexadecimal representation of a binary string. +By default it emits 256 bits (so 64 hex characters). The user can provide +an argument in form of number of bits to emit from 1 to 1024 to change +the output length. Note that the number of bits provided is always +rounded to the next multiple of 4. So for instance asking for just 1 +bit password will result in 4 bits to be emitted, in the form of a single +hex character. @return -@bulk-string-reply: by default 64 bytes string representing 256 bits of -pseudorandom data. Otherwise if an argument if needed, the output string length -is the number of specified bits (rounded to the next multiple of 4) divided -by 4. +@bulk-string-reply: by default 64 bytes string representing 256 bits of pseudorandom data. Otherwise if an argument if needed, the output string length is the number of specified bits (rounded to the next multiple of 4) divided by 4. @examples diff --git a/iredis/data/commands/acl-getuser.md b/iredis/data/commands/acl-getuser.md index 4f6ae57..6c2eeed 100644 --- a/iredis/data/commands/acl-getuser.md +++ b/iredis/data/commands/acl-getuser.md @@ -1,34 +1,43 @@ The command returns all the rules defined for an existing ACL user. -Specifically, it lists the user's ACL flags, password hashes and key name -patterns. Note that command rules are returned as a string in the same format -used with the `ACL SETUSER` command. This description of command rules reflects -the user's effective permissions, so while it may not be identical to the set of -rules used to configure the user, it is still functionally identical. +Specifically, it lists the user's ACL flags, password hashes, commands, key patterns, channel patterns (Added in version 6.2) and selectors (Added in version 7.0). +Additional information may be returned in the future if more metadata is added to the user. -@array-reply: a list of ACL rule definitions for the user. +Command rules are always returned in the same format as the one used in the `ACL SETUSER` command. +Before version 7.0, keys and channels were returned as an array of patterns, however in version 7.0 later they are now also returned in same format as the one used in the `ACL SETUSER` command. +Note: This description of command rules reflects the user's effective permissions, so while it may not be identical to the set of rules used to configure the user, it is still functionally identical. + +Selectors are listed in the order they were applied to the user, and include information about commands, key patterns, and channel patterns. -@history +@array-reply: a list of ACL rule definitions for the user. -- `>= 6.2`: Added Pub/Sub channel patterns. +If `user` does not exist a @nil-reply is returned. @examples -Here's the default configuration for the default user: +Here's an example configuration for a user ``` -> ACL GETUSER default +> ACL SETUSER sample on nopass +GET allkeys &* (+SET ~key2) +"OK" +> ACL GETUSER sample 1) "flags" 2) 1) "on" 2) "allkeys" - 3) "allcommands" - 4) "nopass" + 3) "nopass" 3) "passwords" 4) (empty array) 5) "commands" 6) "+@all" 7) "keys" -8) 1) "*" +8) "~*" 9) "channels" -10) 1) "*" +10) "&*" +11) "selectors" +12) 1) 1) "commands" + 6) "+SET" + 7) "keys" + 8) "~key2" + 9) "channels" + 10) "&*" ``` diff --git a/iredis/data/commands/acl-help.md b/iredis/data/commands/acl-help.md index 3ec1ffb..ddb9432 100644 --- a/iredis/data/commands/acl-help.md +++ b/iredis/data/commands/acl-help.md @@ -1,5 +1,4 @@ -The `ACL HELP` command returns a helpful text describing the different -subcommands. +The `ACL HELP` command returns a helpful text describing the different subcommands. @return diff --git a/iredis/data/commands/acl-list.md b/iredis/data/commands/acl-list.md index 6d6be4f..e21e710 100644 --- a/iredis/data/commands/acl-list.md +++ b/iredis/data/commands/acl-list.md @@ -1,8 +1,8 @@ -The command shows the currently active ACL rules in the Redis server. Each line -in the returned array defines a different user, and the format is the same used -in the redis.conf file or the external ACL file, so you can cut and paste what -is returned by the ACL LIST command directly inside a configuration file if you -wish (but make sure to check `ACL SAVE`). +The command shows the currently active ACL rules in the Redis server. Each +line in the returned array defines a different user, and the format is the +same used in the redis.conf file or the external ACL file, so you can +cut and paste what is returned by the ACL LIST command directly inside a +configuration file if you wish (but make sure to check `ACL SAVE`). @return diff --git a/iredis/data/commands/acl-load.md b/iredis/data/commands/acl-load.md index 3892bb4..521c1a6 100644 --- a/iredis/data/commands/acl-load.md +++ b/iredis/data/commands/acl-load.md @@ -1,20 +1,16 @@ When Redis is configured to use an ACL file (with the `aclfile` configuration -option), this command will reload the ACLs from the file, replacing all the -current ACL rules with the ones defined in the file. The command makes sure to -have an _all or nothing_ behavior, that is: +option), this command will reload the ACLs from the file, replacing all +the current ACL rules with the ones defined in the file. The command makes +sure to have an *all or nothing* behavior, that is: -- If every line in the file is valid, all the ACLs are loaded. -- If one or more line in the file is not valid, nothing is loaded, and the old - ACL rules defined in the server memory continue to be used. +* If every line in the file is valid, all the ACLs are loaded. +* If one or more line in the file is not valid, nothing is loaded, and the old ACL rules defined in the server memory continue to be used. @return @simple-string-reply: `OK` on success. -The command may fail with an error for several reasons: if the file is not -readable, if there is an error inside the file, and in such case the error will -be reported to the user in the error. Finally the command will fail if the -server is not configured to use an external ACL file. +The command may fail with an error for several reasons: if the file is not readable, if there is an error inside the file, and in such case the error will be reported to the user in the error. Finally the command will fail if the server is not configured to use an external ACL file. @examples diff --git a/iredis/data/commands/acl-log.md b/iredis/data/commands/acl-log.md index 6dbdb85..adeaf8d 100644 --- a/iredis/data/commands/acl-log.md +++ b/iredis/data/commands/acl-log.md @@ -4,9 +4,9 @@ The command shows a list of recent ACL security events: 2. Commands denied because against the current ACL rules. 3. Commands denied because accessing keys not allowed in the current ACL rules. -The optional argument specifies how many entries to show. By default up to ten -failures are returned. The special `RESET` argument clears the log. Entries are -displayed starting from the most recent. +The optional argument specifies how many entries to show. By default +up to ten failures are returned. The special `RESET` argument clears the log. +Entries are displayed starting from the most recent. @return diff --git a/iredis/data/commands/acl-save.md b/iredis/data/commands/acl-save.md index 17580c6..57badc8 100644 --- a/iredis/data/commands/acl-save.md +++ b/iredis/data/commands/acl-save.md @@ -1,13 +1,11 @@ When Redis is configured to use an ACL file (with the `aclfile` configuration -option), this command will save the currently defined ACLs from the server -memory to the ACL file. +option), this command will save the currently defined ACLs from the server memory to the ACL file. @return @simple-string-reply: `OK` on success. -The command may fail with an error for several reasons: if the file cannot be -written or if the server is not configured to use an external ACL file. +The command may fail with an error for several reasons: if the file cannot be written or if the server is not configured to use an external ACL file. @examples diff --git a/iredis/data/commands/acl-setuser.md b/iredis/data/commands/acl-setuser.md index 4dca6d1..a6ae4a8 100644 --- a/iredis/data/commands/acl-setuser.md +++ b/iredis/data/commands/acl-setuser.md @@ -1,109 +1,91 @@ -Create an ACL user with the specified rules or modify the rules of an existing -user. This is the main interface in order to manipulate Redis ACL users -interactively: if the username does not exist, the command creates the username -without any privilege, then reads from left to right all the rules provided as -successive arguments, setting the user ACL rules as specified. +Create an ACL user with the specified rules or modify the rules of an +existing user. This is the main interface in order to manipulate Redis ACL +users interactively: if the username does not exist, the command creates +the username without any privilege, then reads from left to right all the +rules provided as successive arguments, setting the user ACL rules as specified. -If the user already exists, the provided ACL rules are simply applied _in -addition_ to the rules already set. For example: +If the user already exists, the provided ACL rules are simply applied +*in addition* to the rules already set. For example: ACL SETUSER virginia on allkeys +set -The above command will create a user called `virginia` that is active (the on -rule), can access any key (allkeys rule), and can call the set command (+set -rule). Then another SETUSER call can modify the user rules: +The above command will create a user called `virginia` that is active +(the on rule), can access any key (allkeys rule), and can call the +set command (+set rule). Then another SETUSER call can modify the user rules: ACL SETUSER virginia +get -The above rule will not apply the new rule to the user virginia, so other than -`SET`, the user virginia will now be able to also use the `GET` command. +The above rule will not apply the new rule to the user virginia, so other than `SET`, the user virginia will now be able to also use the `GET` command. -When we want to be sure to define an user from scratch, without caring if it had -previously defined rules associated, we can use the special rule `reset` as -first rule, in order to flush all the other existing rules: +Starting from Redis 7.0, ACL rules can also be grouped into multiple distinct sets of rules, called selectors. +Selectors are added by wrapping the rules in parentheses and providing them just like any other rule. +In order to execute a command, either the root permissions (rules defined outside of parenthesis) or any of the selectors (rules defined inside parenthesis) must match the given command. +For example: + + ACL SETUSER virginia on +GET allkeys (+SET ~app1*) + +This sets a user with two sets of permission, one defined on the user and one defined with a selector. +The root user permissions only allows executing the get command, but can be executed on any keys. +The selector then grants a secondary set of permissions: access to the `SET` command to be executed on any key that starts with "app1". +Using multiple selectors allows you to grant permissions that are different depending on what keys are being accessed. + +When we want to be sure to define a user from scratch, without caring if +it had previously defined rules associated, we can use the special rule +`reset` as first rule, in order to flush all the other existing rules: ACL SETUSER antirez reset [... other rules ...] -After resetting an user, it returns back to the status it has when it was just -created: non active (off rule), can't execute any command, can't access any key: +After resetting a user, it returns back to the status it has when it +was just created: non active (off rule), can't execute any command, can't +access any key: > ACL SETUSER antirez reset +OK > ACL LIST 1) "user antirez off -@all" -ACL rules are either words like "on", "off", "reset", "allkeys", or are special -rules that start with a special character, and are followed by another string -(without any space in between), like "+SET". +ACL rules are either words like "on", "off", "reset", "allkeys", or are +special rules that start with a special character, and are followed by +another string (without any space in between), like "+SET". -The following documentation is a reference manual about the capabilities of this -command, however our [ACL tutorial](/topics/acl) may be a more gentle -introduction to how the ACL system works in general. +The following documentation is a reference manual about the capabilities of this command, however our [ACL tutorial](/topics/acl) may be a more gentle introduction to how the ACL system works in general. ## List of rules +Redis ACL rules are split into two categories: rules that define command permissions, "Command rules", and rules that define user state, "User management rules". This is a list of all the supported Redis ACL rules: -- `on`: set the user as active, it will be possible to authenticate as this user - using `AUTH <username> <password>`. -- `off`: set user as not active, it will be impossible to log as this user. - Please note that if a user gets disabled (set to off) after there are - connections already authenticated with such a user, the connections will - continue to work as expected. To also kill the old connections you can use - `CLIENT KILL` with the user option. An alternative is to delete the user with - `ACL DELUSER`, that will result in all the connections authenticated as the - deleted user to be disconnected. -- `~<pattern>`: add the specified key pattern (glob style pattern, like in the - `KEYS` command), to the list of key patterns accessible by the user. You can - add multiple key patterns to the same user. Example: `~objects:*` -- `allkeys`: alias for `~*`, it allows the user to access all the keys. -- `resetkeys`: removes all the key patterns from the list of key patterns the - user can access. -- `&<pattern>`: add the specified glob style pattern to the list of Pub/Sub - channel patterns accessible by the user. You can add multiple channel patterns - to the same user. Example: `&chatroom:*` -- `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub - channels. -- `resetchannels`: removes all channel patterns from the list of Pub/Sub channel - patterns the user can access. -- `+<command>`: add this command to the list of the commands the user can call. - Example: `+zadd`. -- `+@<category>`: add all the commands in the specified category to the list of - commands the user is able to execute. Example: `+@string` (adds all the string - commands). For a list of categories check the `ACL CAT` command. -- `+<command>|<subcommand>`: add the specified command to the list of the - commands the user can execute, but only for the specified subcommand. Example: - `+config|get`. Generates an error if the specified command is already allowed - in its full version for the specified user. Note: there is no symmetrical - command to remove subcommands, you need to remove the whole command and re-add - the subcommands you want to allow. This is much safer than removing - subcommands, in the future Redis may add new dangerous subcommands, so - configuring by subtraction is not good. -- `allcommands`: alias of `+@all`. Adds all the commands there are in the - server, including _future commands_ loaded via module, to be executed by this - user. -- `-<command>`. Like `+<command>` but removes the command instead of adding it. -- `-@<category>`: Like `+@<category>` but removes all the commands in the - category instead of adding them. -- `nocommands`: alias for `-@all`. Removes all the commands, the user will no - longer be able to execute anything. -- `nopass`: the user is set as a "no password" user. It means that it will be - possible to authenticate as such user with any password. By default, the - `default` special user is set as "nopass". The `nopass` rule will also reset - all the configured passwords for the user. -- `>password`: Add the specified clear text password as an hashed password in - the list of the users passwords. Every user can have many active passwords, so - that password rotation will be simpler. The specified password is not stored - as clear text inside the server. Example: `>mypassword`. -- `#<hashedpassword>`: Add the specified hashed password to the list of user - passwords. A Redis hashed password is hashed with SHA256 and translated into a - hexadecimal string. Example: - `#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2`. -- `<password`: Like `>password` but removes the password instead of adding it. -- `!<hashedpassword>`: Like `#<hashedpassword>` but removes the password instead - of adding it. -- reset: Remove any capability from the user. It is set to off, without - passwords, unable to execute any command, unable to access any key. +### Command rules + +* `~<pattern>`: add the specified key pattern (glob style pattern, like in the `KEYS` command), to the list of key patterns accessible by the user. This grants both read and write permissions to keys that match the pattern. You can add multiple key patterns to the same user. Example: `~objects:*` +* `%R~<pattern>`: (Available in Redis 7.0 and later) Add the specified read key pattern. This behaves similar to the regular key pattern but only grants permission to read from keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. +* `%W~<pattern>`: (Available in Redis 7.0 and later) Add the specified write key pattern. This behaves similar to the regular key pattern but only grants permission to write to keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. +* `%RW~<pattern>`: (Available in Redis 7.0 and later) Alias for `~<pattern>`. +* `allkeys`: alias for `~*`, it allows the user to access all the keys. +* `resetkeys`: removes all the key patterns from the list of key patterns the user can access. +* `&<pattern>`: (Available in Redis 6.2 and later) add the specified glob style pattern to the list of Pub/Sub channel patterns accessible by the user. You can add multiple channel patterns to the same user. Example: `&chatroom:*` +* `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub channels. +* `resetchannels`: removes all channel patterns from the list of Pub/Sub channel patterns the user can access. +* `+<command>`: Add the command to the list of commands the user can call. Can be used with `|` for allowing subcommands (e.g "+config|get"). +* `+@<category>`: add all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories check the `ACL CAT` command. +* `+<command>|first-arg`: Allow a specific first argument of an otherwise disabled command. It is only supported on commands with no sub-commands, and is not allowed as negative form like -SELECT|1, only additive starting with "+". This feature is deprecated and may be removed in the future. +* `allcommands`: alias of `+@all`. Adds all the commands there are in the server, including *future commands* loaded via module, to be executed by this user. +* `-<command>`: Remove the command to the list of commands the user can call. Starting Redis 7.0, it can be used with `|` for blocking subcommands (e.g "-config|set"). +* `-@<category>`: Like `+@<category>` but removes all the commands in the category instead of adding them. +* `nocommands`: alias for `-@all`. Removes all the commands, the user will no longer be able to execute anything. + +### User management rules + +* `on`: set the user as active, it will be possible to authenticate as this user using `AUTH <username> <password>`. +* `off`: set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use `CLIENT KILL` with the user option. An alternative is to delete the user with `ACL DELUSER`, that will result in all the connections authenticated as the deleted user to be disconnected. +* `nopass`: the user is set as a "no password" user. It means that it will be possible to authenticate as such user with any password. By default, the `default` special user is set as "nopass". The `nopass` rule will also reset all the configured passwords for the user. +* `>password`: Add the specified clear text password as a hashed password in the list of the users passwords. Every user can have many active passwords, so that password rotation will be simpler. The specified password is not stored as clear text inside the server. Example: `>mypassword`. +* `#<hashedpassword>`: Add the specified hashed password to the list of user passwords. A Redis hashed password is hashed with SHA256 and translated into a hexadecimal string. Example: `#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2`. +* `<password`: Like `>password` but removes the password instead of adding it. +* `!<hashedpassword>`: Like `#<hashedpassword>` but removes the password instead of adding it. +* `(<rule list>)`: (Available in Redis 7.0 and later) Create a new selector to match rules against. Selectors are evaluated after the user permissions, and are evaluated according to the order they are defined. If a command matches either the user permissions or any selector, it is allowed. See [selectors](/topics/acl#selectors) for more information. +* `clearselectors`: (Available in Redis 7.0 and later) Delete all of the selectors attached to the user. +* `reset`: Remove any capability from the user. It is set to off, without passwords, unable to execute any command, unable to access any key. @return @@ -111,10 +93,6 @@ This is a list of all the supported Redis ACL rules: If the rules contain errors, the error is returned. -@history - -- `>= 6.2`: Added Pub/Sub channel patterns. - @examples ``` diff --git a/iredis/data/commands/acl-users.md b/iredis/data/commands/acl-users.md index c4d4d8c..9b0fe1b 100644 --- a/iredis/data/commands/acl-users.md +++ b/iredis/data/commands/acl-users.md @@ -1,5 +1,5 @@ -The command shows a list of all the usernames of the currently configured users -in the Redis ACL system. +The command shows a list of all the usernames of the currently configured +users in the Redis ACL system. @return diff --git a/iredis/data/commands/acl-whoami.md b/iredis/data/commands/acl-whoami.md index 3007760..5ec7b84 100644 --- a/iredis/data/commands/acl-whoami.md +++ b/iredis/data/commands/acl-whoami.md @@ -1,6 +1,6 @@ -Return the username the current connection is authenticated with. New -connections are authenticated with the "default" user. They can change user -using `AUTH`. +Return the username the current connection is authenticated with. +New connections are authenticated with the "default" user. They +can change user using `AUTH`. @return diff --git a/iredis/data/commands/acl.md b/iredis/data/commands/acl.md new file mode 100644 index 0000000..eb9277c --- /dev/null +++ b/iredis/data/commands/acl.md @@ -0,0 +1,3 @@ +This is a container command for [Access Control List](/docs/manual/security/acl/) commands. + +To see the list of available commands you can call `ACL HELP`. diff --git a/iredis/data/commands/append.md b/iredis/data/commands/append.md index c354122..2c8bd74 100644 --- a/iredis/data/commands/append.md +++ b/iredis/data/commands/append.md @@ -1,6 +1,7 @@ If `key` already exists and is a string, this command appends the `value` at the -end of the string. If `key` does not exist it is created and set as an empty -string, so `APPEND` will be similar to `SET` in this special case. +end of the string. +If `key` does not exist it is created and set as an empty string, so `APPEND` +will be similar to `SET` in this special case. @return @@ -18,8 +19,8 @@ GET mykey ## Pattern: Time series The `APPEND` command can be used to create a very compact representation of a -list of fixed-size samples, usually referred as _time series_. Every time a new -sample arrives we can store it using the command +list of fixed-size samples, usually referred as _time series_. +Every time a new sample arrives we can store it using the command ``` APPEND timeseries "fixed-size sample" @@ -27,17 +28,17 @@ APPEND timeseries "fixed-size sample" Accessing individual elements in the time series is not hard: -- `STRLEN` can be used in order to obtain the number of samples. -- `GETRANGE` allows for random access of elements. If our time series have - associated time information we can easily implement a binary search to get - range combining `GETRANGE` with the Lua scripting engine available in Redis - 2.6. -- `SETRANGE` can be used to overwrite an existing time series. - -The limitation of this pattern is that we are forced into an append-only mode of -operation, there is no way to cut the time series to a given size easily because -Redis currently lacks a command able to trim string objects. However the space -efficiency of time series stored in this way is remarkable. +* `STRLEN` can be used in order to obtain the number of samples. +* `GETRANGE` allows for random access of elements. + If our time series have associated time information we can easily implement + a binary search to get range combining `GETRANGE` with the Lua scripting + engine available in Redis 2.6. +* `SETRANGE` can be used to overwrite an existing time series. + +The limitation of this pattern is that we are forced into an append-only mode +of operation, there is no way to cut the time series to a given size easily +because Redis currently lacks a command able to trim string objects. +However the space efficiency of time series stored in this way is remarkable. Hint: it is possible to switch to a different key based on the current Unix time, in this way it is possible to have just a relatively small amount of diff --git a/iredis/data/commands/asking.md b/iredis/data/commands/asking.md new file mode 100644 index 0000000..d98643c --- /dev/null +++ b/iredis/data/commands/asking.md @@ -0,0 +1,10 @@ +When a cluster client receives an `-ASK` redirect, the `ASKING` command is sent to the target node followed by the command which was redirected. +This is normally done automatically by cluster clients. + +If an `-ASK` redirect is received during a transaction, only one ASKING command needs to be sent to the target node before sending the complete transaction to the target node. + +See [ASK redirection in the Redis Cluster Specification](/topics/cluster-spec#ask-redirection) for details. + +@return + +@simple-string-reply: `OK`. diff --git a/iredis/data/commands/auth.md b/iredis/data/commands/auth.md index 2e93f70..7c1e02a 100644 --- a/iredis/data/commands/auth.md +++ b/iredis/data/commands/auth.md @@ -1,46 +1,36 @@ The AUTH command authenticates the current connection in two cases: 1. If the Redis server is password protected via the `requirepass` option. -2. If a Redis 6.0 instance, or greater, is using the - [Redis ACL system](/topics/acl). +2. If a Redis 6.0 instance, or greater, is using the [Redis ACL system](/topics/acl). Redis versions prior of Redis 6 were only able to understand the one argument version of the command: AUTH <password> -This form just authenticates against the password set with `requirepass`. In -this configuration Redis will deny any command executed by the just connected -clients, unless the connection gets authenticated via `AUTH`. +This form just authenticates against the password set with `requirepass`. +In this configuration Redis will deny any command executed by the just +connected clients, unless the connection gets authenticated via `AUTH`. -If the password provided via AUTH matches the password in the configuration -file, the server replies with the `OK` status code and starts accepting -commands. Otherwise, an error is returned and the clients needs to try a new -password. +If the password provided via AUTH matches the password in the configuration file, the server replies with the `OK` status code and starts accepting commands. +Otherwise, an error is returned and the clients needs to try a new password. When Redis ACLs are used, the command should be given in an extended way: AUTH <username> <password> In order to authenticate the current connection with one of the connections -defined in the ACL list (see `ACL SETUSER`) and the official -[ACL guide](/topics/acl) for more information. +defined in the ACL list (see `ACL SETUSER`) and the official [ACL guide](/topics/acl) for more information. -When ACLs are used, the single argument form of the command, where only the -password is specified, assumes that the implicit username is "default". - -@history - -- `>= 6.0.0`: Added ACL style (username and password). +When ACLs are used, the single argument form of the command, where only the password is specified, assumes that the implicit username is "default". ## Security notice -Because of the high performance nature of Redis, it is possible to try a lot of -passwords in parallel in very short time, so make sure to generate a strong and -very long password so that this attack is infeasible. A good way to generate -strong passwords is via the `ACL GENPASS` command. +Because of the high performance nature of Redis, it is possible to try +a lot of passwords in parallel in very short time, so make sure to generate a +strong and very long password so that this attack is infeasible. +A good way to generate strong passwords is via the `ACL GENPASS` command. @return -@simple-string-reply or an error if the password, or username/password pair, is -invalid. +@simple-string-reply or an error if the password, or username/password pair, is invalid. diff --git a/iredis/data/commands/bgrewriteaof.md b/iredis/data/commands/bgrewriteaof.md index 2ebaa89..85f5204 100644 --- a/iredis/data/commands/bgrewriteaof.md +++ b/iredis/data/commands/bgrewriteaof.md @@ -1,5 +1,6 @@ -Instruct Redis to start an [Append Only File][tpaof] rewrite process. The -rewrite will create a small optimized version of the current Append Only File. +Instruct Redis to start an [Append Only File][tpaof] rewrite process. +The rewrite will create a small optimized version of the current Append Only +File. [tpaof]: /topics/persistence#append-only-file @@ -10,17 +11,10 @@ process doing persistence. Specifically: -- If a Redis child is creating a snapshot on disk, the AOF rewrite is - _scheduled_ but not started until the saving child producing the RDB file - terminates. In this case the `BGREWRITEAOF` will still return an positive - status reply, but with an appropriate message. You can check if an AOF rewrite - is scheduled looking at the `INFO` command as of Redis 2.6 or successive - versions. -- If an AOF rewrite is already in progress the command returns an error and no +* If a Redis child is creating a snapshot on disk, the AOF rewrite is _scheduled_ but not started until the saving child producing the RDB file terminates. In this case the `BGREWRITEAOF` will still return a positive status reply, but with an appropriate message. You can check if an AOF rewrite is scheduled looking at the `INFO` command as of Redis 2.6 or successive versions. +* If an AOF rewrite is already in progress the command returns an error and no AOF rewrite will be scheduled for a later time. -- If the AOF rewrite could start, but the attempt at starting it fails (for - instance because of an error in creating the child process), an error is - returned to the caller. +* If the AOF rewrite could start, but the attempt at starting it fails (for instance because of an error in creating the child process), an error is returned to the caller. Since Redis 2.4 the AOF rewrite is automatically triggered by Redis, however the `BGREWRITEAOF` command can be used to trigger a rewrite at any time. @@ -31,7 +25,6 @@ Please refer to the [persistence documentation][tp] for detailed information. @return -@simple-string-reply: A simple string reply indicating that the rewriting -started or is about to start ASAP, when the call is executed with success. +@simple-string-reply: A simple string reply indicating that the rewriting started or is about to start ASAP, when the call is executed with success. The command may reply with an error in certain cases, as documented above. diff --git a/iredis/data/commands/bgsave.md b/iredis/data/commands/bgsave.md index f04d71b..714d960 100644 --- a/iredis/data/commands/bgsave.md +++ b/iredis/data/commands/bgsave.md @@ -1,7 +1,8 @@ Save the DB in background. -Normally the OK code is immediately returned. Redis forks, the parent continues -to serve the clients, the child saves the DB on disk then exits. +Normally the OK code is immediately returned. +Redis forks, the parent continues to serve the clients, the child saves the DB +on disk then exits. An error is returned if there is already a background save running or if there is another non-background-save process running, specifically an in-progress AOF @@ -20,9 +21,4 @@ Please refer to the [persistence documentation][tp] for detailed information. @return -@simple-string-reply: `Background saving started` if `BGSAVE` started correctly -or `Background saving scheduled` when used with the `SCHEDULE` subcommand. - -@history - -- `>= 3.2.2`: Added the `SCHEDULE` option. +@simple-string-reply: `Background saving started` if `BGSAVE` started correctly or `Background saving scheduled` when used with the `SCHEDULE` subcommand. diff --git a/iredis/data/commands/bitcount.md b/iredis/data/commands/bitcount.md index 680aae1..95bd3a3 100644 --- a/iredis/data/commands/bitcount.md +++ b/iredis/data/commands/bitcount.md @@ -1,8 +1,8 @@ Count the number of set bits (population counting) in a string. -By default all the bytes contained in the string are examined. It is possible to -specify the counting operation only in an interval passing the additional -arguments _start_ and _end_. +By default all the bytes contained in the string are examined. +It is possible to specify the counting operation only in an interval passing the +additional arguments _start_ and _end_. Like for the `GETRANGE` command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last @@ -10,6 +10,11 @@ byte, -2 is the penultimate, and so forth. Non-existent keys are treated as empty strings, so the command will return zero. +By default, the additional arguments _start_ and _end_ specify a byte index. +We can use an additional argument `BIT` to specify a bit index. +So 0 is the first bit, 1 is the second bit, and so forth. +For negative values, -1 is the last bit, -2 is the penultimate, and so forth. + @return @integer-reply @@ -23,44 +28,48 @@ SET mykey "foobar" BITCOUNT mykey BITCOUNT mykey 0 0 BITCOUNT mykey 1 1 +BITCOUNT mykey 1 1 BYTE +BITCOUNT mykey 5 30 BIT ``` ## Pattern: real-time metrics using bitmaps Bitmaps are a very space-efficient representation of certain kinds of -information. One example is a Web application that needs the history of user -visits, so that for instance it is possible to determine what users are good -targets of beta features. +information. +One example is a Web application that needs the history of user visits, so that +for instance it is possible to determine what users are good targets of beta +features. Using the `SETBIT` command this is trivial to accomplish, identifying every day -with a small progressive integer. For instance day 0 is the first day the -application was put online, day 1 the next day, and so forth. +with a small progressive integer. +For instance day 0 is the first day the application was put online, day 1 the +next day, and so forth. -Every time a user performs a page view, the application can register that in the -current day the user visited the web site using the `SETBIT` command setting the -bit corresponding to the current day. +Every time a user performs a page view, the application can register that in +the current day the user visited the web site using the `SETBIT` command setting +the bit corresponding to the current day. Later it will be trivial to know the number of single days the user visited the web site simply calling the `BITCOUNT` command against the bitmap. -A similar pattern where user IDs are used instead of days is described in the -article called "[Fast easy realtime metrics using Redis +A similar pattern where user IDs are used instead of days is described +in the article called "[Fast easy realtime metrics using Redis bitmaps][hbgc212fermurb]". -[hbgc212fermurb]: - http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps +[hbgc212fermurb]: http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps ## Performance considerations In the above example of counting days, even after 10 years the application is online we still have just `365*10` bits of data per user, that is just 456 bytes -per user. With this amount of data `BITCOUNT` is still as fast as any other O(1) -Redis command like `GET` or `INCR`. +per user. +With this amount of data `BITCOUNT` is still as fast as any other O(1) Redis +command like `GET` or `INCR`. When the bitmap is big, there are two alternatives: -- Taking a separated key that is incremented every time the bitmap is modified. +* Taking a separated key that is incremented every time the bitmap is modified. This can be very efficient and atomic using a small Redis Lua script. -- Running the bitmap incrementally using the `BITCOUNT` _start_ and _end_ +* Running the bitmap incrementally using the `BITCOUNT` _start_ and _end_ optional parameters, accumulating the results client-side, and optionally caching the result into a key. diff --git a/iredis/data/commands/bitfield.md b/iredis/data/commands/bitfield.md index 6d10d93..6609c85 100644 --- a/iredis/data/commands/bitfield.md +++ b/iredis/data/commands/bitfield.md @@ -1,18 +1,8 @@ -The command treats a Redis string as a array of bits, and is capable of -addressing specific integer fields of varying bit widths and arbitrary non -(necessary) aligned offset. In practical terms using this command you can set, -for example, a signed 5 bits integer at bit offset 1234 to a specific value, -retrieve a 31 bit unsigned integer from offset 4567. Similarly the command -handles increments and decrements of the specified integers, providing -guaranteed and well specified overflow and underflow behavior that the user can -configure. - -`BITFIELD` is able to operate with multiple bit fields in the same command call. -It takes a list of operations to perform, and returns an array of replies, where -each array matches the corresponding operation in the list of arguments. - -For example the following command increments an 5 bit signed integer at bit -offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: +The command treats a Redis string as an array of bits, and is capable of addressing specific integer fields of varying bit widths and arbitrary non (necessary) aligned offset. In practical terms using this command you can set, for example, a signed 5 bits integer at bit offset 1234 to a specific value, retrieve a 31 bit unsigned integer from offset 4567. Similarly the command handles increments and decrements of the specified integers, providing guaranteed and well specified overflow and underflow behavior that the user can configure. + +`BITFIELD` is able to operate with multiple bit fields in the same command call. It takes a list of operations to perform, and returns an array of replies, where each array matches the corresponding operation in the list of arguments. + +For example the following command increments a 5 bit signed integer at bit offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: > BITFIELD mykey INCRBY i5 100 1 GET u4 0 1) (integer) 1 @@ -20,76 +10,54 @@ offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0: Note that: -1. Addressing with `!GET` bits outside the current string length (including the - case the key does not exist at all), results in the operation to be performed - like the missing part all consists of bits set to 0. -2. Addressing with `!SET` or `!INCRBY` bits outside the current string length - will enlarge the string, zero-padding it, as needed, for the minimal length - needed, according to the most far bit touched. +1. Addressing with `!GET` bits outside the current string length (including the case the key does not exist at all), results in the operation to be performed like the missing part all consists of bits set to 0. +2. Addressing with `!SET` or `!INCRBY` bits outside the current string length will enlarge the string, zero-padding it, as needed, for the minimal length needed, according to the most far bit touched. -## Supported subcommands and integer types +## Supported subcommands and integer encoding The following is the list of supported commands. -- **GET** `<type>` `<offset>` -- Returns the specified bit field. -- **SET** `<type>` `<offset>` `<value>` -- Set the specified bit field and - returns its old value. -- **INCRBY** `<type>` `<offset>` `<increment>` -- Increments or decrements (if a - negative increment is given) the specified bit field and returns the new - value. +* **GET** `<encoding>` `<offset>` -- Returns the specified bit field. +* **SET** `<encoding>` `<offset>` `<value>` -- Set the specified bit field and returns its old value. +* **INCRBY** `<encoding>` `<offset>` `<increment>` -- Increments or decrements (if a negative increment is given) the specified bit field and returns the new value. There is another subcommand that only changes the behavior of successive `!INCRBY` and `!SET` subcommands calls by setting the overflow behavior: -- **OVERFLOW** `[WRAP|SAT|FAIL]` +* **OVERFLOW** `[WRAP|SAT|FAIL]` -Where an integer type is expected, it can be composed by prefixing with `i` for -signed integers and `u` for unsigned integers with the number of bits of our -integer type. So for example `u8` is an unsigned integer of 8 bits and `i16` is -a signed integer of 16 bits. +Where an integer encoding is expected, it can be composed by prefixing with `i` for signed integers and `u` for unsigned integers with the number of bits of our integer encoding. So for example `u8` is an unsigned integer of 8 bits and `i16` is a +signed integer of 16 bits. -The supported types are up to 64 bits for signed integers, and up to 63 bits for +The supported encodings are up to 64 bits for signed integers, and up to 63 bits for unsigned integers. This limitation with unsigned integers is due to the fact that currently the Redis protocol is unable to return 64 bit unsigned integers as replies. ## Bits and positional offsets -There are two ways in order to specify offsets in the bitfield command. If a -number without any prefix is specified, it is used just as a zero based bit -offset inside the string. +There are two ways in order to specify offsets in the bitfield command. +If a number without any prefix is specified, it is used just as a zero based +bit offset inside the string. -However if the offset is prefixed with a `#` character, the specified offset is -multiplied by the integer type width, so for example: +However if the offset is prefixed with a `#` character, the specified offset +is multiplied by the integer encoding's width, so for example: BITFIELD mystring SET i8 #0 100 SET i8 #1 200 -Will set the first i8 integer at offset 0 and the second at offset 8. This way -you don't have to do the math yourself inside your client if what you want is a -plain array of integers of a given size. +Will set the first i8 integer at offset 0 and the second at offset 8. +This way you don't have to do the math yourself inside your client if what +you want is a plain array of integers of a given size. ## Overflow control -Using the `OVERFLOW` command the user is able to fine-tune the behavior of the -increment or decrement overflow (or underflow) by specifying one of the -following behaviors: - -- **WRAP**: wrap around, both with signed and unsigned integers. In the case of - unsigned integers, wrapping is like performing the operation modulo the - maximum value the integer can contain (the C standard behavior). With signed - integers instead wrapping means that overflows restart towards the most - negative value and underflows towards the most positive ones, so for example - if an `i8` integer is set to the value 127, incrementing it by 1 will yield - `-128`. -- **SAT**: uses saturation arithmetic, that is, on underflows the value is set - to the minimum integer value, and on overflows to the maximum integer value. - For example incrementing an `i8` integer starting from value 120 with an - increment of 10, will result into the value 127, and further increments will - always keep the value at 127. The same happens on underflows, but towards the - value is blocked at the most negative value. -- **FAIL**: in this mode no operation is performed on overflows or underflows - detected. The corresponding return value is set to NULL to signal the - condition to the caller. +Using the `OVERFLOW` command the user is able to fine-tune the behavior of +the increment or decrement overflow (or underflow) by specifying one of +the following behaviors: + +* **WRAP**: wrap around, both with signed and unsigned integers. In the case of unsigned integers, wrapping is like performing the operation modulo the maximum value the integer can contain (the C standard behavior). With signed integers instead wrapping means that overflows restart towards the most negative value and underflows towards the most positive ones, so for example if an `i8` integer is set to the value 127, incrementing it by 1 will yield `-128`. +* **SAT**: uses saturation arithmetic, that is, on underflows the value is set to the minimum integer value, and on overflows to the maximum integer value. For example incrementing an `i8` integer starting from value 120 with an increment of 10, will result into the value 127, and further increments will always keep the value at 127. The same happens on underflows, but towards the value is blocked at the most negative value. +* **FAIL**: in this mode no operation is performed on overflows or underflows detected. The corresponding return value is set to NULL to signal the condition to the caller. Note that each `OVERFLOW` statement only affects the `!INCRBY` and `!SET` commands that follow it in the list of subcommands, up to the next `OVERFLOW` @@ -124,33 +92,25 @@ The following is an example of `OVERFLOW FAIL` returning NULL. ## Motivations The motivation for this command is that the ability to store many small integers -as a single large bitmap (or segmented over a few keys to avoid having huge -keys) is extremely memory efficient, and opens new use cases for Redis to be -applied, especially in the field of real time analytics. This use cases are -supported by the ability to specify the overflow in a controlled way. +as a single large bitmap (or segmented over a few keys to avoid having huge keys) is extremely memory efficient, and opens new use cases for Redis to be applied, especially in the field of real time analytics. This use cases are supported by the ability to specify the overflow in a controlled way. -Fun fact: Reddit's 2017 April fools' project -[r/place](https://reddit.com/r/place) was -[built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) -in order to take an in-memory representation of the collaborative canvas. +Fun fact: Reddit's 2017 April fools' project [r/place](https://reddit.com/r/place) was [built using the Redis BITFIELD command](https://redditblog.com/2017/04/13/how-we-built-rplace/) in order to take an in-memory representation of the collaborative canvas. ## Performance considerations -Usually `BITFIELD` is a fast command, however note that addressing far bits of -currently short strings will trigger an allocation that may be more costly than -executing the command on bits already existing. +Usually `BITFIELD` is a fast command, however note that addressing far bits of currently short strings will trigger an allocation that may be more costly than executing the command on bits already existing. ## Orders of bits -The representation used by `BITFIELD` considers the bitmap as having the bit -number 0 to be the most significant bit of the first byte, and so forth, so for -example setting a 5 bits unsigned integer to value 23 at offset 7 into a bitmap -previously set to all zeroes, will produce the following representation: +The representation used by `BITFIELD` considers the bitmap as having the +bit number 0 to be the most significant bit of the first byte, and so forth, so +for example setting a 5 bits unsigned integer to value 23 at offset 7 into a +bitmap previously set to all zeroes, will produce the following representation: +--------+--------+ |00000001|01110000| +--------+--------+ -When offsets and integer sizes are aligned to bytes boundaries, this is the same -as big endian, however when such alignment does not exist, its important to also -understand how the bits inside a byte are ordered. +When offsets and integer sizes are aligned to bytes boundaries, this is the +same as big endian, however when such alignment does not exist, its important +to also understand how the bits inside a byte are ordered. diff --git a/iredis/data/commands/bitfield_ro.md b/iredis/data/commands/bitfield_ro.md new file mode 100644 index 0000000..94057a1 --- /dev/null +++ b/iredis/data/commands/bitfield_ro.md @@ -0,0 +1,19 @@ +Read-only variant of the `BITFIELD` command. +It is like the original `BITFIELD` but only accepts `!GET` subcommand and can safely be used in read-only replicas. + +Since the original `BITFIELD` has `!SET` and `!INCRBY` options it is technically flagged as a writing command in the Redis command table. +For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). + +Since Redis 6.2, the `BITFIELD_RO` variant was introduced in order to allow `BITFIELD` behavior in read-only replicas without breaking compatibility on command flags. + +See original `BITFIELD` for more details. + +@examples + +``` +BITFIELD_RO hello GET i8 16 +``` + +@return + +@array-reply: An array with each entry being the corresponding result of the subcommand given at the same position. diff --git a/iredis/data/commands/bitop.md b/iredis/data/commands/bitop.md index 656befa..d35c756 100644 --- a/iredis/data/commands/bitop.md +++ b/iredis/data/commands/bitop.md @@ -4,13 +4,14 @@ store the result in the destination key. The `BITOP` command supports four bitwise operations: **AND**, **OR**, **XOR** and **NOT**, thus the valid forms to call the command are: -- `BITOP AND destkey srckey1 srckey2 srckey3 ... srckeyN` -- `BITOP OR destkey srckey1 srckey2 srckey3 ... srckeyN` -- `BITOP XOR destkey srckey1 srckey2 srckey3 ... srckeyN` -- `BITOP NOT destkey srckey` + +* `BITOP AND destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP OR destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP XOR destkey srckey1 srckey2 srckey3 ... srckeyN` +* `BITOP NOT destkey srckey` As you can see **NOT** is special as it only takes an input key, because it -performs inversion of bits so it only makes sense as an unary operator. +performs inversion of bits so it only makes sense as a unary operator. The result of the operation is always stored at `destkey`. @@ -27,8 +28,8 @@ zero bytes up to the length of the longest string. @integer-reply -The size of the string stored in the destination key, that is equal to the size -of the longest input string. +The size of the string stored in the destination key, that is equal to the +size of the longest input string. @examples @@ -42,20 +43,20 @@ GET dest ## Pattern: real time metrics using bitmaps `BITOP` is a good complement to the pattern documented in the `BITCOUNT` command -documentation. Different bitmaps can be combined in order to obtain a target -bitmap where the population counting operation is performed. +documentation. +Different bitmaps can be combined in order to obtain a target bitmap where +the population counting operation is performed. See the article called "[Fast easy realtime metrics using Redis -bitmaps][hbgc212fermurb]" for a interesting use cases. +bitmaps][hbgc212fermurb]" for an interesting use cases. -[hbgc212fermurb]: - http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps +[hbgc212fermurb]: http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps ## Performance considerations -`BITOP` is a potentially slow command as it runs in O(N) time. Care should be -taken when running it against long input strings. +`BITOP` is a potentially slow command as it runs in O(N) time. +Care should be taken when running it against long input strings. For real-time metrics and statistics involving large inputs a good approach is -to use a replica (with read-only option disabled) where the bit-wise operations -are performed to avoid blocking the master instance. +to use a replica (with read-only option disabled) where the bit-wise +operations are performed to avoid blocking the master instance. diff --git a/iredis/data/commands/bitpos.md b/iredis/data/commands/bitpos.md index 56dc588..1016941 100644 --- a/iredis/data/commands/bitpos.md +++ b/iredis/data/commands/bitpos.md @@ -1,25 +1,24 @@ Return the position of the first bit set to 1 or 0 in a string. -The position is returned, thinking of the string as an array of bits from left -to right, where the first byte's most significant bit is at position 0, the -second byte's most significant bit is at position 8, and so forth. +The position is returned, thinking of the string as an array of bits from left to +right, where the first byte's most significant bit is at position 0, the second +byte's most significant bit is at position 8, and so forth. The same bit position convention is followed by `GETBIT` and `SETBIT`. -By default, all the bytes contained in the string are examined. It is possible -to look for bits only in a specified interval passing the additional arguments -_start_ and _end_ (it is possible to just pass _start_, the operation will -assume that the end is the last byte of the string. However there are semantic -differences as explained later). The range is interpreted as a range of bytes -and not a range of bits, so `start=0` and `end=2` means to look at the first -three bytes. +By default, all the bytes contained in the string are examined. +It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end is the last byte of the string. However there are semantic differences as explained later). +By default, the range is interpreted as a range of bytes and not a range of bits, so `start=0` and `end=2` means to look at the first three bytes. -Note that bit positions are returned always as absolute values starting from bit -zero even when _start_ and _end_ are used to specify a range. +You can use the optional `BIT` modifier to specify that the range should be interpreted as a range of bits. +So `start=0` and `end=2` means to look at the first three bits. + +Note that bit positions are returned always as absolute values starting from bit zero even when _start_ and _end_ are used to specify a range. Like for the `GETRANGE` command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last -byte, -2 is the penultimate, and so forth. +byte, -2 is the penultimate, and so forth. When `BIT` is specified, -1 is the last +bit, -2 is the penultimate, and so forth. Non-existent keys are treated as empty strings. @@ -27,25 +26,15 @@ Non-existent keys are treated as empty strings. @integer-reply -The command returns the position of the first bit set to 1 or 0 according to the -request. +The command returns the position of the first bit set to 1 or 0 according to the request. -If we look for set bits (the bit argument is 1) and the string is empty or -composed of just zero bytes, -1 is returned. +If we look for set bits (the bit argument is 1) and the string is empty or composed of just zero bytes, -1 is returned. -If we look for clear bits (the bit argument is 0) and the string only contains -bit set to 1, the function returns the first bit not part of the string on the -right. So if the string is three bytes set to the value `0xff` the command -`BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. +If we look for clear bits (the bit argument is 0) and the string only contains bit set to 1, the function returns the first bit not part of the string on the right. So if the string is three bytes set to the value `0xff` the command `BITPOS key 0` will return 24, since up to bit 23 all the bits are 1. -Basically, the function considers the right of the string as padded with zeros -if you look for clear bits and specify no range or the _start_ argument -**only**. +Basically, the function considers the right of the string as padded with zeros if you look for clear bits and specify no range or the _start_ argument **only**. -However, this behavior changes if you are looking for clear bits and specify a -range with both **start** and **end**. If no clear bit is found in the specified -range, the function returns -1 as the user specified a clear range and there are -no 0 bits in that range. +However, this behavior changes if you are looking for clear bits and specify a range with both __start__ and __end__. If no clear bit is found in the specified range, the function returns -1 as the user specified a clear range and there are no 0 bits in that range. @examples @@ -55,6 +44,9 @@ BITPOS mykey 0 SET mykey "\x00\xff\xf0" BITPOS mykey 1 0 BITPOS mykey 1 2 +BITPOS mykey 1 2 -1 BYTE +BITPOS mykey 1 7 15 BIT set mykey "\x00\x00\x00" BITPOS mykey 1 +BITPOS mykey 1 7 -3 BIT ``` diff --git a/iredis/data/commands/blmove.md b/iredis/data/commands/blmove.md index 3edf4e5..463a2dc 100644 --- a/iredis/data/commands/blmove.md +++ b/iredis/data/commands/blmove.md @@ -1,8 +1,9 @@ -`BLMOVE` is the blocking variant of `LMOVE`. When `source` contains elements, -this command behaves exactly like `LMOVE`. When used inside a `MULTI`/`EXEC` -block, this command behaves exactly like `LMOVE`. When `source` is empty, Redis -will block the connection until another client pushes to it or until `timeout` -is reached. A `timeout` of zero can be used to block indefinitely. +`BLMOVE` is the blocking variant of `LMOVE`. +When `source` contains elements, this command behaves exactly like `LMOVE`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `LMOVE`. +When `source` is empty, Redis will block the connection until another client +pushes to it or until `timeout` (a double value specifying the maximum number of seconds to block) is reached. +A `timeout` of zero can be used to block indefinitely. This command comes in place of the now deprecated `BRPOPLPUSH`. Doing `BLMOVE RIGHT LEFT` is equivalent. @@ -11,8 +12,8 @@ See `LMOVE` for more information. @return -@bulk-string-reply: the element being popped from `source` and pushed to -`destination`. If `timeout` is reached, a @nil-reply is returned. +@bulk-string-reply: the element being popped from `source` and pushed to `destination`. +If `timeout` is reached, a @nil-reply is returned. ## Pattern: Reliable queue diff --git a/iredis/data/commands/blmpop.md b/iredis/data/commands/blmpop.md new file mode 100644 index 0000000..262713e --- /dev/null +++ b/iredis/data/commands/blmpop.md @@ -0,0 +1,15 @@ +`BLMPOP` is the blocking variant of `LMPOP`. + +When any of the lists contains elements, this command behaves exactly like `LMPOP`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `LMPOP`. +When all lists are empty, Redis will block the connection until another client pushes to it or until the `timeout` (a double value specifying the maximum number of seconds to block) elapses. +A `timeout` of zero can be used to block indefinitely. + +See `LMPOP` for more information. + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped, and timeout is reached. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. diff --git a/iredis/data/commands/blpop.md b/iredis/data/commands/blpop.md index e44037e..1d73fff 100644 --- a/iredis/data/commands/blpop.md +++ b/iredis/data/commands/blpop.md @@ -1,7 +1,8 @@ -`BLPOP` is a blocking list pop primitive. It is the blocking version of `LPOP` -because it blocks the connection when there are no elements to pop from any of -the given lists. An element is popped from the head of the first list that is -non-empty, with the given keys being checked in the order that they are given. +`BLPOP` is a blocking list pop primitive. +It is the blocking version of `LPOP` because it blocks the connection when there +are no elements to pop from any of the given lists. +An element is popped from the head of the first list that is non-empty, with the +given keys being checked in the order that they are given. ## Non-blocking behavior @@ -9,9 +10,10 @@ When `BLPOP` is called, if at least one of the specified keys contains a non-empty list, an element is popped from the head of the list and returned to the caller together with the `key` it was popped from. -Keys are checked in the order that they are given. Let's say that the key -`list1` doesn't exist and `list2` and `list3` hold non-empty lists. Consider the -following command: +Keys are checked in the order that they are given. +Let's say that the key `list1` doesn't exist and `list2` and `list3` hold +non-empty lists. +Consider the following command: ``` BLPOP list1 list2 list3 0 @@ -29,93 +31,53 @@ client performs an `LPUSH` or `RPUSH` operation against one of the keys. Once new data is present on one of the lists, the client returns with the name of the key unblocking it and the popped value. -When `BLPOP` causes a client to block and a non-zero timeout is specified, the -client will unblock returning a `nil` multi-bulk value when the specified +When `BLPOP` causes a client to block and a non-zero timeout is specified, +the client will unblock returning a `nil` multi-bulk value when the specified timeout has expired without a push operation against at least one of the specified keys. -**The timeout argument is interpreted as a double value specifying the maximum -number of seconds to block**. A timeout of zero can be used to block -indefinitely. +**The timeout argument is interpreted as a double value specifying the maximum number of seconds to block**. A timeout of zero can be used to block indefinitely. ## What key is served first? What client? What element? Priority ordering details. -- If the client tries to blocks for multiple keys, but at least one key contains - elements, the returned key / element pair is the first key from left to right - that has one or more elements. In this case the client is not blocked. So for - instance `BLPOP key1 key2 key3 key4 0`, assuming that both `key2` and `key4` - are non-empty, will always return an element from `key2`. -- If multiple clients are blocked for the same key, the first client to be - served is the one that was waiting for more time (the first that blocked for - the key). Once a client is unblocked it does not retain any priority, when it - blocks again with the next call to `BLPOP` it will be served accordingly to - the number of clients already blocked for the same key, that will all be - served before it (from the first to the last that blocked). -- When a client is blocking for multiple keys at the same time, and elements are - available at the same time in multiple keys (because of a transaction or a Lua - script added elements to multiple lists), the client will be unblocked using - the first key that received a push operation (assuming it has enough elements - to serve our client, as there may be other clients as well waiting for this - key). Basically after the execution of every command Redis will run a list of - all the keys that received data AND that have at least a client blocked. The - list is ordered by new element arrival time, from the first key that received - data to the last. For every key processed, Redis will serve all the clients - waiting for that key in a FIFO fashion, as long as there are elements in this - key. When the key is empty or there are no longer clients waiting for this - key, the next key that received new data in the previous command / transaction - / script is processed, and so forth. +* If the client tries to blocks for multiple keys, but at least one key contains elements, the returned key / element pair is the first key from left to right that has one or more elements. In this case the client is not blocked. So for instance `BLPOP key1 key2 key3 key4 0`, assuming that both `key2` and `key4` are non-empty, will always return an element from `key2`. +* If multiple clients are blocked for the same key, the first client to be served is the one that was waiting for more time (the first that blocked for the key). Once a client is unblocked it does not retain any priority, when it blocks again with the next call to `BLPOP` it will be served accordingly to the number of clients already blocked for the same key, that will all be served before it (from the first to the last that blocked). +* When a client is blocking for multiple keys at the same time, and elements are available at the same time in multiple keys (because of a transaction or a Lua script added elements to multiple lists), the client will be unblocked using the first key that received a push operation (assuming it has enough elements to serve our client, as there may be other clients as well waiting for this key). Basically after the execution of every command Redis will run a list of all the keys that received data AND that have at least a client blocked. The list is ordered by new element arrival time, from the first key that received data to the last. For every key processed, Redis will serve all the clients waiting for that key in a FIFO fashion, as long as there are elements in this key. When the key is empty or there are no longer clients waiting for this key, the next key that received new data in the previous command / transaction / script is processed, and so forth. ## Behavior of `!BLPOP` when multiple elements are pushed inside a list. -There are times when a list can receive multiple elements in the context of the -same conceptual command: +There are times when a list can receive multiple elements in the context of the same conceptual command: -- Variadic push operations such as `LPUSH mylist a b c`. -- After an `EXEC` of a `MULTI` block with multiple push operations against the - same list. -- Executing a Lua Script with Redis 2.6 or newer. +* Variadic push operations such as `LPUSH mylist a b c`. +* After an `EXEC` of a `MULTI` block with multiple push operations against the same list. +* Executing a Lua Script with Redis 2.6 or newer. -When multiple elements are pushed inside a list where there are clients -blocking, the behavior is different for Redis 2.4 and Redis 2.6 or newer. +When multiple elements are pushed inside a list where there are clients blocking, the behavior is different for Redis 2.4 and Redis 2.6 or newer. -For Redis 2.6 what happens is that the command performing multiple pushes is -executed, and _only after_ the execution of the command the blocked clients are -served. Consider this sequence of commands. +For Redis 2.6 what happens is that the command performing multiple pushes is executed, and *only after* the execution of the command the blocked clients are served. Consider this sequence of commands. Client A: BLPOP foo 0 Client B: LPUSH foo a b c -If the above condition happens using a Redis 2.6 server or greater, Client **A** -will be served with the `c` element, because after the `LPUSH` command the list -contains `c,b,a`, so taking an element from the left means to return `c`. +If the above condition happens using a Redis 2.6 server or greater, Client **A** will be served with the `c` element, because after the `LPUSH` command the list contains `c,b,a`, so taking an element from the left means to return `c`. -Instead Redis 2.4 works in a different way: clients are served _in the context_ -of the push operation, so as long as `LPUSH foo a b c` starts pushing the first -element to the list, it will be delivered to the Client **A**, that will receive -`a` (the first element pushed). +Instead Redis 2.4 works in a different way: clients are served *in the context* of the push operation, so as long as `LPUSH foo a b c` starts pushing the first element to the list, it will be delivered to the Client **A**, that will receive `a` (the first element pushed). -The behavior of Redis 2.4 creates a lot of problems when replicating or -persisting data into the AOF file, so the much more generic and semantically -simpler behavior was introduced into Redis 2.6 to prevent problems. +The behavior of Redis 2.4 creates a lot of problems when replicating or persisting data into the AOF file, so the much more generic and semantically simpler behavior was introduced into Redis 2.6 to prevent problems. -Note that for the same reason a Lua script or a `MULTI/EXEC` block may push -elements into a list and afterward **delete the list**. In this case the blocked -clients will not be served at all and will continue to be blocked as long as no -data is present on the list after the execution of a single command, -transaction, or script. +Note that for the same reason a Lua script or a `MULTI/EXEC` block may push elements into a list and afterward **delete the list**. In this case the blocked clients will not be served at all and will continue to be blocked as long as no data is present on the list after the execution of a single command, transaction, or script. ## `!BLPOP` inside a `!MULTI` / `!EXEC` transaction -`BLPOP` can be used with pipelining (sending multiple commands and reading the -replies in batch), however this setup makes sense almost solely when it is the -last command of the pipeline. +`BLPOP` can be used with pipelining (sending multiple commands and +reading the replies in batch), however this setup makes sense almost solely +when it is the last command of the pipeline. -Using `BLPOP` inside a `MULTI` / `EXEC` block does not make a lot of sense as it -would require blocking the entire server in order to execute the block +Using `BLPOP` inside a `MULTI` / `EXEC` block does not make a lot of sense +as it would require blocking the entire server in order to execute the block atomically, which in turn does not allow other clients to perform a push -operation. For this reason the behavior of `BLPOP` inside `MULTI` / `EXEC` when -the list is empty is to return a `nil` multi-bulk reply, which is the same thing -that happens when the timeout is reached. +operation. For this reason the behavior of `BLPOP` inside `MULTI` / `EXEC` when the list is empty is to return a `nil` multi-bulk reply, which is the same +thing that happens when the timeout is reached. If you like science fiction, think of time flowing at infinite speed inside a `MULTI` / `EXEC` block... @@ -124,15 +86,11 @@ If you like science fiction, think of time flowing at infinite speed inside a @array-reply: specifically: -- A `nil` multi-bulk when no element could be popped and the timeout expired. -- A two-element multi-bulk with the first element being the name of the key +* A `nil` multi-bulk when no element could be popped and the timeout expired. +* A two-element multi-bulk with the first element being the name of the key where an element was popped and the second element being the value of the popped element. -@history - -- `>= 6.0`: `timeout` is interpreted as a double instead of an integer. - @examples ``` @@ -147,23 +105,19 @@ redis> BLPOP list1 list2 0 ## Reliable queues -When `BLPOP` returns an element to the client, it also removes the element from -the list. This means that the element only exists in the context of the client: -if the client crashes while processing the returned element, it is lost forever. +When `BLPOP` returns an element to the client, it also removes the element from the list. This means that the element only exists in the context of the client: if the client crashes while processing the returned element, it is lost forever. -This can be a problem with some application where we want a more reliable -messaging system. When this is the case, please check the `BRPOPLPUSH` command, -that is a variant of `BLPOP` that adds the returned element to a target list -before returning it to the client. +This can be a problem with some application where we want a more reliable messaging system. When this is the case, please check the `BRPOPLPUSH` command, that is a variant of `BLPOP` that adds the returned element to a target list before returning it to the client. ## Pattern: Event notification Using blocking list operations it is possible to mount different blocking -primitives. For instance for some application you may need to block waiting for -elements into a Redis Set, so that as far as a new element is added to the Set, -it is possible to retrieve it without resort to polling. This would require a -blocking version of `SPOP` that is not available, but using blocking list -operations we can easily accomplish this task. +primitives. +For instance for some application you may need to block waiting for elements +into a Redis Set, so that as far as a new element is added to the Set, it is +possible to retrieve it without resort to polling. +This would require a blocking version of `SPOP` that is not available, but using +blocking list operations we can easily accomplish this task. The consumer will do: diff --git a/iredis/data/commands/brpop.md b/iredis/data/commands/brpop.md index ca7df79..dfa2b91 100644 --- a/iredis/data/commands/brpop.md +++ b/iredis/data/commands/brpop.md @@ -1,7 +1,8 @@ -`BRPOP` is a blocking list pop primitive. It is the blocking version of `RPOP` -because it blocks the connection when there are no elements to pop from any of -the given lists. An element is popped from the tail of the first list that is -non-empty, with the given keys being checked in the order that they are given. +`BRPOP` is a blocking list pop primitive. +It is the blocking version of `RPOP` because it blocks the connection when there +are no elements to pop from any of the given lists. +An element is popped from the tail of the first list that is non-empty, with the +given keys being checked in the order that they are given. See the [BLPOP documentation][cb] for the exact semantics, since `BRPOP` is identical to `BLPOP` with the only difference being that it pops elements from @@ -13,15 +14,11 @@ the tail of a list instead of popping from the head. @array-reply: specifically: -- A `nil` multi-bulk when no element could be popped and the timeout expired. -- A two-element multi-bulk with the first element being the name of the key +* A `nil` multi-bulk when no element could be popped and the timeout expired. +* A two-element multi-bulk with the first element being the name of the key where an element was popped and the second element being the value of the popped element. -@history - -- `>= 6.0`: `timeout` is interpreted as a double instead of an integer. - @examples ``` diff --git a/iredis/data/commands/brpoplpush.md b/iredis/data/commands/brpoplpush.md index b37183f..9a6fe37 100644 --- a/iredis/data/commands/brpoplpush.md +++ b/iredis/data/commands/brpoplpush.md @@ -1,23 +1,16 @@ -`BRPOPLPUSH` is the blocking variant of `RPOPLPUSH`. When `source` contains -elements, this command behaves exactly like `RPOPLPUSH`. When used inside a -`MULTI`/`EXEC` block, this command behaves exactly like `RPOPLPUSH`. When -`source` is empty, Redis will block the connection until another client pushes -to it or until `timeout` is reached. A `timeout` of zero can be used to block -indefinitely. - -As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please prefer `BLMOVE` -in new code. +`BRPOPLPUSH` is the blocking variant of `RPOPLPUSH`. +When `source` contains elements, this command behaves exactly like `RPOPLPUSH`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `RPOPLPUSH`. +When `source` is empty, Redis will block the connection until another client +pushes to it or until `timeout` is reached. +A `timeout` of zero can be used to block indefinitely. See `RPOPLPUSH` for more information. @return -@bulk-string-reply: the element being popped from `source` and pushed to -`destination`. If `timeout` is reached, a @nil-reply is returned. - -@history - -- `>= 6.0`: `timeout` is interpreted as a double instead of an integer. +@bulk-string-reply: the element being popped from `source` and pushed to `destination`. +If `timeout` is reached, a @nil-reply is returned. ## Pattern: Reliable queue diff --git a/iredis/data/commands/bzmpop.md b/iredis/data/commands/bzmpop.md new file mode 100644 index 0000000..dc0c077 --- /dev/null +++ b/iredis/data/commands/bzmpop.md @@ -0,0 +1,16 @@ +`BZMPOP` is the blocking variant of `ZMPOP`. + +When any of the sorted sets contains elements, this command behaves exactly like `ZMPOP`. +When used inside a `MULTI`/`EXEC` block, this command behaves exactly like `ZMPOP`. +When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the `timeout` (a double value specifying the maximum number of seconds to block) elapses. +A `timeout` of zero can be used to block indefinitely. + +See `ZMPOP` for more information. + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score. + diff --git a/iredis/data/commands/bzpopmax.md b/iredis/data/commands/bzpopmax.md index 6ab6543..8155ed8 100644 --- a/iredis/data/commands/bzpopmax.md +++ b/iredis/data/commands/bzpopmax.md @@ -1,9 +1,9 @@ `BZPOPMAX` is the blocking variant of the sorted set `ZPOPMAX` primitive. It is the blocking version because it blocks the connection when there are no -members to pop from any of the given sorted sets. A member with the highest -score is popped from first sorted set that is non-empty, with the given keys -being checked in the order that they are given. +members to pop from any of the given sorted sets. +A member with the highest score is popped from first sorted set that is +non-empty, with the given keys being checked in the order that they are given. The `timeout` argument is interpreted as a double value specifying the maximum number of seconds to block. A timeout of zero can be used to block indefinitely. @@ -18,14 +18,10 @@ with the highest scores instead of popping the ones with the lowest scores. @array-reply: specifically: -- A `nil` multi-bulk when no element could be popped and the timeout expired. -- A three-element multi-bulk with the first element being the name of the key - where a member was popped, the second element is the popped member itself, and - the third element is the score of the popped element. - -@history - -- `>= 6.0`: `timeout` is interpreted as a double instead of an integer. +* A `nil` multi-bulk when no element could be popped and the timeout expired. +* A three-element multi-bulk with the first element being the name of the key + where a member was popped, the second element is the popped member itself, + and the third element is the score of the popped element. @examples diff --git a/iredis/data/commands/bzpopmin.md b/iredis/data/commands/bzpopmin.md index 5541726..b48a4fb 100644 --- a/iredis/data/commands/bzpopmin.md +++ b/iredis/data/commands/bzpopmin.md @@ -1,11 +1,11 @@ `BZPOPMIN` is the blocking variant of the sorted set `ZPOPMIN` primitive. It is the blocking version because it blocks the connection when there are no -members to pop from any of the given sorted sets. A member with the lowest score -is popped from first sorted set that is non-empty, with the given keys being -checked in the order that they are given. +members to pop from any of the given sorted sets. +A member with the lowest score is popped from first sorted set that is +non-empty, with the given keys being checked in the order that they are given. -The `timeout` argument is interpreted as an double value specifying the maximum +The `timeout` argument is interpreted as a double value specifying the maximum number of seconds to block. A timeout of zero can be used to block indefinitely. See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is @@ -18,14 +18,10 @@ popped from. @array-reply: specifically: -- A `nil` multi-bulk when no element could be popped and the timeout expired. -- A three-element multi-bulk with the first element being the name of the key - where a member was popped, the second element is the popped member itself, and - the third element is the score of the popped element. - -@history - -- `>= 6.0`: `timeout` is interpreted as a double instead of an integer. +* A `nil` multi-bulk when no element could be popped and the timeout expired. +* A three-element multi-bulk with the first element being the name of the key + where a member was popped, the second element is the popped member itself, + and the third element is the score of the popped element. @examples diff --git a/iredis/data/commands/client-caching.md b/iredis/data/commands/client-caching.md index 13346e3..1f4b8b8 100644 --- a/iredis/data/commands/client-caching.md +++ b/iredis/data/commands/client-caching.md @@ -1,18 +1,21 @@ -This command controls the tracking of the keys in the next command executed by -the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. Please -check the [client side caching documentation](/topics/client-side-caching) for +This command controls the tracking of the keys in the next command executed +by the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. +Please check the +[client side caching documentation](/topics/client-side-caching) for background information. When tracking is enabled Redis, using the `CLIENT TRACKING` command, it is -possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only -commands are not automatically remembered by the server to be invalidated later. -When we are in `OPTIN` mode, we can enable the tracking of the keys in the next -command by calling `CLIENT CACHING yes` immediately before it. Similarly when we -are in `OPTOUT` mode, and keys are normally tracked, we can avoid the keys in -the next command to be tracked using `CLIENT CACHING no`. +possible to specify the `OPTIN` or `OPTOUT` options, so that keys +in read only commands are not automatically remembered by the server to +be invalidated later. When we are in `OPTIN` mode, we can enable the +tracking of the keys in the next command by calling `CLIENT CACHING yes` +immediately before it. Similarly when we are in `OPTOUT` mode, and keys +are normally tracked, we can avoid the keys in the next command to be +tracked using `CLIENT CACHING no`. -Basically the command sets a state in the connection, that is valid only for the -next command execution, that will modify the behavior of client tracking. +Basically the command sets a state in the connection, that is valid only +for the next command execution, that will modify the behavior of client +tracking. @return diff --git a/iredis/data/commands/client-getname.md b/iredis/data/commands/client-getname.md index e91d8ae..f60539d 100644 --- a/iredis/data/commands/client-getname.md +++ b/iredis/data/commands/client-getname.md @@ -1,6 +1,4 @@ -The `CLIENT GETNAME` returns the name of the current connection as set by -`CLIENT SETNAME`. Since every new connection starts without an associated name, -if no name was assigned a null bulk reply is returned. +The `CLIENT GETNAME` returns the name of the current connection as set by `CLIENT SETNAME`. Since every new connection starts without an associated name, if no name was assigned a null bulk reply is returned. @return diff --git a/iredis/data/commands/client-getredir.md b/iredis/data/commands/client-getredir.md index ddaa1b7..2cc3269 100644 --- a/iredis/data/commands/client-getredir.md +++ b/iredis/data/commands/client-getredir.md @@ -1,13 +1,11 @@ This command returns the client ID we are redirecting our -[tracking](/topics/client-side-caching) notifications to. We set a client to -redirect to when using `CLIENT TRACKING` to enable tracking. However in order to -avoid forcing client libraries implementations to remember the ID notifications -are redirected to, this command exists in order to improve introspection and -allow clients to check later if redirection is active and towards which client -ID. +[tracking](/topics/client-side-caching) notifications to. We set a client +to redirect to when using `CLIENT TRACKING` to enable tracking. However in +order to avoid forcing client libraries implementations to remember the +ID notifications are redirected to, this command exists in order to improve +introspection and allow clients to check later if redirection is active +and towards which client ID. @return -@integer-reply: the ID of the client we are redirecting the notifications to. -The command returns `-1` if client tracking is not enabled, or `0` if client -tracking is enabled but we are not redirecting the notifications to any client. +@integer-reply: the ID of the client we are redirecting the notifications to. The command returns `-1` if client tracking is not enabled, or `0` if client tracking is enabled but we are not redirecting the notifications to any client. diff --git a/iredis/data/commands/client-help.md b/iredis/data/commands/client-help.md new file mode 100644 index 0000000..964a625 --- /dev/null +++ b/iredis/data/commands/client-help.md @@ -0,0 +1,5 @@ +The `CLIENT HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/client-id.md b/iredis/data/commands/client-id.md index d242d6f..fe6723c 100644 --- a/iredis/data/commands/client-id.md +++ b/iredis/data/commands/client-id.md @@ -1,16 +1,11 @@ -The command just returns the ID of the current connection. Every connection ID -has certain guarantees: +The command just returns the ID of the current connection. Every connection +ID has certain guarantees: -1. It is never repeated, so if `CLIENT ID` returns the same number, the caller - can be sure that the underlying client did not disconnect and reconnect the - connection, but it is still the same connection. -2. The ID is monotonically incremental. If the ID of a connection is greater - than the ID of another connection, it is guaranteed that the second - connection was established with the server at a later time. +1. It is never repeated, so if `CLIENT ID` returns the same number, the caller can be sure that the underlying client did not disconnect and reconnect the connection, but it is still the same connection. +2. The ID is monotonically incremental. If the ID of a connection is greater than the ID of another connection, it is guaranteed that the second connection was established with the server at a later time. This command is especially useful together with `CLIENT UNBLOCK` which was -introduced also in Redis 5 together with `CLIENT ID`. Check the `CLIENT UNBLOCK` -command page for a pattern involving the two commands. +introduced also in Redis 5 together with `CLIENT ID`. Check the `CLIENT UNBLOCK` command page for a pattern involving the two commands. @examples diff --git a/iredis/data/commands/client-info.md b/iredis/data/commands/client-info.md index ab64543..f60592e 100644 --- a/iredis/data/commands/client-info.md +++ b/iredis/data/commands/client-info.md @@ -1,8 +1,6 @@ -The command returns information and statistics about the current client -connection in a mostly human readable format. +The command returns information and statistics about the current client connection in a mostly human readable format. -The reply format is identical to that of `CLIENT LIST`, and the content consists -only of information about the current client. +The reply format is identical to that of `CLIENT LIST`, and the content consists only of information about the current client. @examples @@ -12,5 +10,4 @@ CLIENT INFO @return -@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for -the current client. +@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for the current client. diff --git a/iredis/data/commands/client-kill.md b/iredis/data/commands/client-kill.md index 1b47313..ea65aaf 100644 --- a/iredis/data/commands/client-kill.md +++ b/iredis/data/commands/client-kill.md @@ -1,60 +1,45 @@ -The `CLIENT KILL` command closes a given client connection. This command support -two formats, the old format: +The `CLIENT KILL` command closes a given client connection. This command support two formats, the old format: CLIENT KILL addr:port -The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` -field). +The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` field). The new format: CLIENT KILL <filter> <value> ... ... <filter> <value> -With the new form it is possible to kill clients by different attributes instead -of killing just by address. The following filters are available: - -- `CLIENT KILL ADDR ip:port`. This is exactly the same as the old - three-arguments behavior. -- `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local - (bind) address. -- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. - Client `ID`'s are retrieved using the `CLIENT LIST` command. -- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `replica` - and `pubsub`. This closes the connections of **all the clients** in the - specified class. Note that clients blocked into the `MONITOR` command are - considered to belong to the `normal` class. -- `CLIENT KILL USER username`. Closes all the connections that are authenticated - with the specified [ACL](/topics/acl) username, however it returns an error if - the username does not map to an existing ACL user. -- `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, - the client calling the command will not get killed, however setting this - option to `no` will have the effect of also killing the client calling the - command. - -It is possible to provide multiple filters at the same time. The command will -handle multiple filters via logical AND. For example: +With the new form it is possible to kill clients by different attributes +instead of killing just by address. The following filters are available: + +* `CLIENT KILL ADDR ip:port`. This is exactly the same as the old three-arguments behavior. +* `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local (bind) address. +* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. Client `ID`'s are retrieved using the `CLIENT LIST` command. +* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the `MONITOR` command are considered to belong to the `normal` class. +* `CLIENT KILL USER username`. Closes all the connections that are authenticated with the specified [ACL](/topics/acl) username, however it returns an error if the username does not map to an existing ACL user. +* `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. + +It is possible to provide multiple filters at the same time. The command will handle multiple filters via logical AND. For example: CLIENT KILL addr 127.0.0.1:12345 type pubsub -is valid and will kill only a pubsub client with the specified address. This -format containing multiple filters is rarely useful currently. +is valid and will kill only a pubsub client with the specified address. This format containing multiple filters is rarely useful currently. -When the new form is used the command no longer returns `OK` or an error, but -instead the number of killed clients, that may be zero. +When the new form is used the command no longer returns `OK` or an error, but instead the number of killed clients, that may be zero. ## CLIENT KILL and Redis Sentinel -Recent versions of Redis Sentinel (Redis 2.8.12 or greater) use CLIENT KILL in -order to kill clients when an instance is reconfigured, in order to force -clients to perform the handshake with one Sentinel again and update its -configuration. +Recent versions of Redis Sentinel (Redis 2.8.12 or greater) use CLIENT KILL +in order to kill clients when an instance is reconfigured, in order to +force clients to perform the handshake with one Sentinel again and update +its configuration. ## Notes -Due to the single-threaded nature of Redis, it is not possible to kill a client -connection while it is executing a command. From the client point of view, the -connection can never be closed in the middle of the execution of a command. -However, the client will notice the connection has been closed only when the +Due to the single-threaded nature of Redis, it is not possible to +kill a client connection while it is executing a command. From +the client point of view, the connection can never be closed +in the middle of the execution of a command. However, the client +will notice the connection has been closed only when the next command is sent (and results in network error). @return @@ -66,12 +51,3 @@ When called with the three arguments format: When called with the filter / value format: @integer-reply: the number of clients killed. - -@history - -- `>= 2.8.12`: Added new filter format. -- `>= 2.8.12`: `ID` option. -- `>= 3.2`: Added `master` type in for `TYPE` option. -- `>= 5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for - backward compatibility. -- `>= 6.2`: `LADDR` option. diff --git a/iredis/data/commands/client-list.md b/iredis/data/commands/client-list.md index 7956f2b..6241425 100644 --- a/iredis/data/commands/client-list.md +++ b/iredis/data/commands/client-list.md @@ -1,49 +1,46 @@ The `CLIENT LIST` command returns information and statistics about the client connections server in a mostly human readable format. -You can use one of the optional subcommands to filter the list. The `TYPE type` -subcommand filters the list by clients' type, where _type_ is one of `normal`, -`master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR` -command belong to the `normal` class. +You can use one of the optional subcommands to filter the list. The `TYPE type` subcommand filters the list by clients' type, where *type* is one of `normal`, `master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR` command belong to the `normal` class. -The `ID` filter only returns entries for clients with IDs matching the -`client-id` arguments. +The `ID` filter only returns entries for clients with IDs matching the `client-id` arguments. @return @bulk-string-reply: a unique string, formatted as follows: -- One client connection per line (separated by LF) -- Each line is composed of a succession of `property=value` fields separated by - a space character. +* One client connection per line (separated by LF) +* Each line is composed of a succession of `property=value` fields separated + by a space character. Here is the meaning of the fields: -- `id`: an unique 64-bit client ID. -- `name`: the name set by the client with `CLIENT SETNAME` -- `addr`: address/port of the client -- `laddr`: address/port of local address client connected to (bind address) -- `fd`: file descriptor corresponding to the socket -- `age`: total duration of the connection in seconds -- `idle`: idle time of the connection in seconds -- `flags`: client flags (see below) -- `db`: current database ID -- `sub`: number of channel subscriptions -- `psub`: number of pattern matching subscriptions -- `multi`: number of commands in a MULTI/EXEC context -- `qbuf`: query buffer length (0 means no query pending) -- `qbuf-free`: free space of the query buffer (0 means the buffer is full) -- `obl`: output buffer length -- `oll`: output list length (replies are queued in this list when the buffer is - full) -- `omem`: output buffer memory usage -- `events`: file descriptor events (see below) -- `cmd`: last command played -- `argv-mem`: incomplete arguments for the next command (already extracted from - query buffer) -- `tot-mem`: total memory consumed by this client in its various buffers -- `redir`: client id of current client tracking redirection -- `user`: the authenticated username of the client +* `id`: a unique 64-bit client ID +* `addr`: address/port of the client +* `laddr`: address/port of local address client connected to (bind address) +* `fd`: file descriptor corresponding to the socket +* `name`: the name set by the client with `CLIENT SETNAME` +* `age`: total duration of the connection in seconds +* `idle`: idle time of the connection in seconds +* `flags`: client flags (see below) +* `db`: current database ID +* `sub`: number of channel subscriptions +* `psub`: number of pattern matching subscriptions +* `ssub`: number of shard channel subscriptions. Added in Redis 7.0.3 +* `multi`: number of commands in a MULTI/EXEC context +* `qbuf`: query buffer length (0 means no query pending) +* `qbuf-free`: free space of the query buffer (0 means the buffer is full) +* `argv-mem`: incomplete arguments for the next command (already extracted from query buffer) +* `multi-mem`: memory is used up by buffered multi commands. Added in Redis 7.0 +* `obl`: output buffer length +* `oll`: output list length (replies are queued in this list when the buffer is full) +* `omem`: output buffer memory usage +* `tot-mem`: total memory consumed by this client in its various buffers +* `events`: file descriptor events (see below) +* `cmd`: last command played +* `user`: the authenticated username of the client +* `redir`: client id of current client tracking redirection +* `resp`: client RESP protocol version. Added in Redis 7.0 The client flags can be a combination of: @@ -64,7 +61,7 @@ U: the client is connected via a Unix domain socket x: the client is in a MULTI/EXEC context t: the client enabled keys tracking in order to perform client side caching R: the client tracking target client is invalid -B: the client enabled broadcast tracking mode +B: the client enabled broadcast tracking mode ``` The file descriptor events can be: @@ -76,13 +73,7 @@ w: the client socket is writable (event loop) ## Notes -New fields are regularly added for debugging purpose. Some could be removed in -the future. A version safe Redis client using this command should parse the -output accordingly (i.e. handling gracefully missing fields, skipping unknown -fields). - -@history - -- `>= 2.8.12`: Added unique client `id` field. -- `>= 5.0`: Added optional `TYPE` filter. -- `>= 6.2`: Added `laddr` field and the optional `ID` filter. +New fields are regularly added for debugging purpose. Some could be removed +in the future. A version safe Redis client using this command should parse +the output accordingly (i.e. handling gracefully missing fields, skipping +unknown fields). diff --git a/iredis/data/commands/client-no-evict.md b/iredis/data/commands/client-no-evict.md new file mode 100644 index 0000000..70070a6 --- /dev/null +++ b/iredis/data/commands/client-no-evict.md @@ -0,0 +1,11 @@ +The `CLIENT NO-EVICT` command sets the [client eviction](/topics/clients#client-eviction) mode for the current connection. + +When turned on and client eviction is configured, the current connection will be excluded from the client eviction process even if we're above the configured client eviction threshold. + +When turned off, the current client will be re-included in the pool of potential clients to be evicted (and evicted if needed). + +See [client eviction](/topics/clients#client-eviction) for more details. + +@return + +@simple-string-reply: `OK`. diff --git a/iredis/data/commands/client-pause.md b/iredis/data/commands/client-pause.md index 3de6beb..6f778da 100644 --- a/iredis/data/commands/client-pause.md +++ b/iredis/data/commands/client-pause.md @@ -1,62 +1,44 @@ -`CLIENT PAUSE` is a connections control command able to suspend all the Redis -clients for the specified amount of time (in milliseconds). +`CLIENT PAUSE` is a connections control command able to suspend all the Redis clients for the specified amount of time (in milliseconds). The command performs the following actions: -- It stops processing all the pending commands from normal and pub/sub clients - for the given mode. However interactions with replicas will continue normally. - Note that clients are formally paused when they try to execute a command, so - no work is taken on the server side for inactive clients. -- However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command - execution is not paused by itself. -- When the specified amount of time has elapsed, all the clients are unblocked: - this will trigger the processing of all the commands accumulated in the query - buffer of every client during the pause. +* It stops processing all the pending commands from normal and pub/sub clients for the given mode. However interactions with replicas will continue normally. Note that clients are formally paused when they try to execute a command, so no work is taken on the server side for inactive clients. +* However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command execution is not paused by itself. +* When the specified amount of time has elapsed, all the clients are unblocked: this will trigger the processing of all the commands accumulated in the query buffer of every client during the pause. Client pause currently supports two modes: -- `ALL`: This is the default mode. All client commands are blocked. -- `WRITE`: Clients are only blocked if they attempt to execute a write command. +* `ALL`: This is the default mode. All client commands are blocked. +* `WRITE`: Clients are only blocked if they attempt to execute a write command. For the `WRITE` mode, some commands have special behavior: -- `EVAL`/`EVALSHA`: Will block client for all scripts. -- `PUBLISH`: Will block client. -- `PFCOUNT`: Will block client. -- `WAIT`: Acknowledgements will be delayed, so this command will appear blocked. +* `EVAL`/`EVALSHA`: Will block client for all scripts. +* `PUBLISH`: Will block client. +* `PFCOUNT`: Will block client. +* `WAIT`: Acknowledgments will be delayed, so this command will appear blocked. -This command is useful as it makes able to switch clients from a Redis instance -to another one in a controlled way. For example during an instance upgrade the -system administrator could do the following: +This command is useful as it makes able to switch clients from a Redis instance to another one in a controlled way. For example during an instance upgrade the system administrator could do the following: -- Pause the clients using `CLIENT PAUSE` -- Wait a few seconds to make sure the replicas processed the latest replication - stream from the master. -- Turn one of the replicas into a master. -- Reconfigure clients to connect with the new master. +* Pause the clients using `CLIENT PAUSE` +* Wait a few seconds to make sure the replicas processed the latest replication stream from the master. +* Turn one of the replicas into a master. +* Reconfigure clients to connect with the new master. -Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode -will stop all replication traffic, can be aborted with the `CLIENT UNPAUSE` -command, and allows reconfiguring the old master without risking accepting -writes after the failover. This is also the mode used during cluster failover. +Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode will stop all replication traffic, can be +aborted with the `CLIENT UNPAUSE` command, and allows reconfiguring the old master without risking accepting writes after the +failover. This is also the mode used during cluster failover. -For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC -block together with the `INFO replication` command in order to get the current -master offset at the time the clients are blocked. This way it is possible to -wait for a specific offset in the replica side in order to make sure all the -replication stream was processed. +For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the `INFO replication` command in order to get the current master offset at the time the clients are blocked. This way it is possible to wait for a specific offset in the replica side in order to make sure all the replication stream was processed. Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or expired during the time clients are paused. This way the dataset is guaranteed -to be static not just from the point of view of clients not being able to write, -but also from the point of view of internal operations. +to be static not just from the point of view of clients not being able to write, but also from the point of view of internal operations. @return -@simple-string-reply: The command returns OK or an error if the timeout is -invalid. +@simple-string-reply: The command returns OK or an error if the timeout is invalid. -@history +## Behavior change history -- `>= 3.2.10`: Client pause prevents client pause and key eviction as well. -- `>= 6.2`: CLIENT PAUSE WRITE mode added along with the `mode` option. +* `>= 3.2.0`: Client pause prevents client pause and key eviction as well.
\ No newline at end of file diff --git a/iredis/data/commands/client-reply.md b/iredis/data/commands/client-reply.md index fe8ed94..f2c3ed8 100644 --- a/iredis/data/commands/client-reply.md +++ b/iredis/data/commands/client-reply.md @@ -1,21 +1,13 @@ -Sometimes it can be useful for clients to completely disable replies from the -Redis server. For example when the client sends fire and forget commands or -performs a mass loading of data, or in caching contexts where new data is -streamed constantly. In such contexts to use server time and bandwidth in order -to send back replies to clients, which are going to be ignored, is considered -wasteful. +Sometimes it can be useful for clients to completely disable replies from the Redis server. For example when the client sends fire and forget commands or performs a mass loading of data, or in caching contexts where new data is streamed constantly. In such contexts to use server time and bandwidth in order to send back replies to clients, which are going to be ignored, is considered wasteful. -The `CLIENT REPLY` command controls whether the server will reply the client's -commands. The following modes are available: +The `CLIENT REPLY` command controls whether the server will reply the client's commands. The following modes are available: -- `ON`. This is the default mode in which the server returns a reply to every - command. -- `OFF`. In this mode the server will not reply to client commands. -- `SKIP`. This mode skips the reply of command immediately after it. +* `ON`. This is the default mode in which the server returns a reply to every command. +* `OFF`. In this mode the server will not reply to client commands. +* `SKIP`. This mode skips the reply of command immediately after it. @return -When called with either `OFF` or `SKIP` subcommands, no reply is made. When -called with `ON`: +When called with either `OFF` or `SKIP` subcommands, no reply is made. When called with `ON`: @simple-string-reply: `OK`. diff --git a/iredis/data/commands/client-setname.md b/iredis/data/commands/client-setname.md index 0155a42..c1e70af 100644 --- a/iredis/data/commands/client-setname.md +++ b/iredis/data/commands/client-setname.md @@ -1,27 +1,18 @@ The `CLIENT SETNAME` command assigns a name to the current connection. -The assigned name is displayed in the output of `CLIENT LIST` so that it is -possible to identify the client that performed a given connection. +The assigned name is displayed in the output of `CLIENT LIST` so that it is possible to identify the client that performed a given connection. -For instance when Redis is used in order to implement a queue, producers and -consumers of messages may want to set the name of the connection according to -their role. +For instance when Redis is used in order to implement a queue, producers and consumers of messages may want to set the name of the connection according to their role. -There is no limit to the length of the name that can be assigned if not the -usual limits of the Redis string type (512 MB). However it is not possible to -use spaces in the connection name as this would violate the format of the -`CLIENT LIST` reply. +There is no limit to the length of the name that can be assigned if not the usual limits of the Redis string type (512 MB). However it is not possible to use spaces in the connection name as this would violate the format of the `CLIENT LIST` reply. -It is possible to entirely remove the connection name setting it to the empty -string, that is not a valid connection name since it serves to this specific -purpose. +It is possible to entirely remove the connection name setting it to the empty string, that is not a valid connection name since it serves to this specific purpose. The connection name can be inspected using `CLIENT GETNAME`. Every new connection starts without an assigned name. -Tip: setting names to connections is a good way to debug connection leaks due to -bugs in the application using Redis. +Tip: setting names to connections is a good way to debug connection leaks due to bugs in the application using Redis. @return diff --git a/iredis/data/commands/client-tracking.md b/iredis/data/commands/client-tracking.md index 4dc379b..e77f7d9 100644 --- a/iredis/data/commands/client-tracking.md +++ b/iredis/data/commands/client-tracking.md @@ -1,56 +1,33 @@ -This command enables the tracking feature of the Redis server, that is used for -[server assisted client side caching](/topics/client-side-caching). - -When tracking is enabled Redis remembers the keys that the connection requested, -in order to send later invalidation messages when such keys are modified. -Invalidation messages are sent in the same connection (only available when the -RESP3 protocol is used) or redirected in a different connection (available also -with RESP2 and Pub/Sub). A special _broadcasting_ mode is available where -clients participating in this protocol receive every notification just -subscribing to given key prefixes, regardless of the keys that they requested. -Given the complexity of the argument please refer to -[the main client side caching documentation](/topics/client-side-caching) for -the details. This manual page is only a reference for the options of this -subcommand. +This command enables the tracking feature of the Redis server, that is used +for [server assisted client side caching](/topics/client-side-caching). + +When tracking is enabled Redis remembers the keys that the connection +requested, in order to send later invalidation messages when such keys are +modified. Invalidation messages are sent in the same connection (only available +when the RESP3 protocol is used) or redirected in a different connection +(available also with RESP2 and Pub/Sub). A special *broadcasting* mode is +available where clients participating in this protocol receive every +notification just subscribing to given key prefixes, regardless of the +keys that they requested. Given the complexity of the argument please +refer to [the main client side caching documentation](/topics/client-side-caching) for the details. This manual page is only a reference for the options of this subcommand. In order to enable tracking, use: CLIENT TRACKING on ... options ... The feature will remain active in the current connection for all its life, -unless tracking is turned on with `CLIENT TRACKING off` at some point. - -The following are the list of options that modify the behavior of the command -when enabling tracking: - -- `REDIRECT <id>`: send redirection messages to the connection with the - specified ID. The connection must exist, you can get the ID of such connection - using `CLIENT ID`. If the connection we are redirecting to is terminated, when - in RESP3 mode the connection with tracking enabled will receive - `tracking-redir-broken` push messages in order to signal the condition. -- `BCAST`: enable tracking in broadcasting mode. In this mode invalidation - messages are reported for all the prefixes specified, regardless of the keys - requested by the connection. Instead when the broadcasting mode is not - enabled, Redis will track which keys are fetched using read-only commands, and - will report invalidation messages only for such keys. -- `PREFIX <prefix>`: for broadcasting, register a given key prefix, so that - notifications will be provided only for keys starting with this string. This - option can be given multiple times to register multiple prefixes. If - broadcasting is enabled without this option, Redis will send notifications for - every key. You can't delete a single prefix, but you can delete all prefixes - by disabling and re-enabling tracking. Using this option adds the additional - time complexity of O(N^2), where N is the total number of prefixes tracked. -- `OPTIN`: when broadcasting is NOT active, normally don't track keys in read - only commands, unless they are called immediately after a `CLIENT CACHING yes` - command. -- `OPTOUT`: when broadcasting is NOT active, normally track keys in read only - commands, unless they are called immediately after a `CLIENT CACHING no` - command. -- `NOLOOP`: don't send notifications about keys modified by this connection - itself. +unless tracking is turned off with `CLIENT TRACKING off` at some point. + +The following are the list of options that modify the behavior of the +command when enabling tracking: + +* `REDIRECT <id>`: send invalidation messages to the connection with the specified ID. The connection must exist. You can get the ID of a connection using `CLIENT ID`. If the connection we are redirecting to is terminated, when in RESP3 mode the connection with tracking enabled will receive `tracking-redir-broken` push messages in order to signal the condition. +* `BCAST`: enable tracking in broadcasting mode. In this mode invalidation messages are reported for all the prefixes specified, regardless of the keys requested by the connection. Instead when the broadcasting mode is not enabled, Redis will track which keys are fetched using read-only commands, and will report invalidation messages only for such keys. +* `PREFIX <prefix>`: for broadcasting, register a given key prefix, so that notifications will be provided only for keys starting with this string. This option can be given multiple times to register multiple prefixes. If broadcasting is enabled without this option, Redis will send notifications for every key. You can't delete a single prefix, but you can delete all prefixes by disabling and re-enabling tracking. Using this option adds the additional time complexity of O(N^2), where N is the total number of prefixes tracked. +* `OPTIN`: when broadcasting is NOT active, normally don't track keys in read only commands, unless they are called immediately after a `CLIENT CACHING yes` command. +* `OPTOUT`: when broadcasting is NOT active, normally track keys in read only commands, unless they are called immediately after a `CLIENT CACHING no` command. +* `NOLOOP`: don't send notifications about keys modified by this connection itself. @return -@simple-string-reply: `OK` if the connection was successfully put in tracking -mode or if the tracking mode was successfully disabled. Otherwise an error is -returned. +@simple-string-reply: `OK` if the connection was successfully put in tracking mode or if the tracking mode was successfully disabled. Otherwise an error is returned. diff --git a/iredis/data/commands/client-trackinginfo.md b/iredis/data/commands/client-trackinginfo.md index 55dc577..82de43e 100644 --- a/iredis/data/commands/client-trackinginfo.md +++ b/iredis/data/commands/client-trackinginfo.md @@ -1,25 +1,18 @@ -The command returns information about the current client connection's use of the -[server assisted client side caching](/topics/client-side-caching) feature. +The command returns information about the current client connection's use of the [server assisted client side caching](/topics/client-side-caching) feature. @return -@array-reply: a list of tracking information sections and their respective -values, specifically: +@array-reply: a list of tracking information sections and their respective values, specifically: -- **flags**: A list of tracking flags used by the connection. The flags and - their meanings are as follows: - - `off`: The connection isn't using server assisted client side caching. - - `on`: Server assisted client side caching is enabled for the connection. - - `bcast`: The client uses broadcasting mode. - - `optin`: The client does not cache keys by default. - - `optout`: The client caches keys by default. - - `caching-yes`: The next command will cache keys (exists only together with - `optin`). - - `caching-no`: The next command won't cache keys (exists only together with - `optout`). - - `noloop`: The client isn't notified about keys modified by itself. - - `broken_redirect`: The client ID used for redirection isn't valid anymore. -- **redirect**: The client ID used for notifications redirection, or -1 when - none. -- **prefixes**: A list of key prefixes for which notifications are sent to the - client. +* **flags**: A list of tracking flags used by the connection. The flags and their meanings are as follows: + * `off`: The connection isn't using server assisted client side caching. + * `on`: Server assisted client side caching is enabled for the connection. + * `bcast`: The client uses broadcasting mode. + * `optin`: The client does not cache keys by default. + * `optout`: The client caches keys by default. + * `caching-yes`: The next command will cache keys (exists only together with `optin`). + * `caching-no`: The next command won't cache keys (exists only together with `optout`). + * `noloop`: The client isn't notified about keys modified by itself. + * `broken_redirect`: The client ID used for redirection isn't valid anymore. +* **redirect**: The client ID used for notifications redirection, or -1 when none. +* **prefixes**: A list of key prefixes for which notifications are sent to the client. diff --git a/iredis/data/commands/client-unblock.md b/iredis/data/commands/client-unblock.md index dae5fb3..11dff98 100644 --- a/iredis/data/commands/client-unblock.md +++ b/iredis/data/commands/client-unblock.md @@ -1,39 +1,34 @@ -This command can unblock, from a different connection, a client blocked in a -blocking operation, such as for instance `BRPOP` or `XREAD` or `WAIT`. +This command can unblock, from a different connection, a client blocked in a blocking operation, such as for instance `BRPOP` or `XREAD` or `WAIT`. -By default the client is unblocked as if the timeout of the command was reached, -however if an additional (and optional) argument is passed, it is possible to -specify the unblocking behavior, that can be **TIMEOUT** (the default) or -**ERROR**. If **ERROR** is specified, the behavior is to unblock the client -returning as error the fact that the client was force-unblocked. Specifically -the client will receive the following error: +By default the client is unblocked as if the timeout of the command was +reached, however if an additional (and optional) argument is passed, it is possible to specify the unblocking behavior, that can be **TIMEOUT** (the default) or **ERROR**. If **ERROR** is specified, the behavior is to unblock the client returning as error the fact that the client was force-unblocked. Specifically the client will receive the following error: -UNBLOCKED client unblocked via CLIENT UNBLOCK -Note: of course as usually it is not guaranteed that the error text remains the -same, however the error code will remain `-UNBLOCKED`. +Note: of course as usually it is not guaranteed that the error text remains +the same, however the error code will remain `-UNBLOCKED`. -This command is useful especially when we are monitoring many keys with a -limited number of connections. For instance we may want to monitor multiple +This command is useful especially when we are monitoring many keys with +a limited number of connections. For instance we may want to monitor multiple streams with `XREAD` without using more than N connections. However at some -point the consumer process is informed that there is one more stream key to -monitor. In order to avoid using more connections, the best behavior would be to -stop the blocking command from one of the connections in the pool, add the new -key, and issue the blocking command again. +point the consumer process is informed that there is one more stream key +to monitor. In order to avoid using more connections, the best behavior would +be to stop the blocking command from one of the connections in the pool, add +the new key, and issue the blocking command again. -To obtain this behavior the following pattern is used. The process uses an -additional _control connection_ in order to send the `CLIENT UNBLOCK` command if -needed. In the meantime, before running the blocking operation on the other -connections, the process runs `CLIENT ID` in order to get the ID associated with -that connection. When a new key should be added, or when a key should no longer -be monitored, the relevant connection blocking command is aborted by sending -`CLIENT UNBLOCK` in the control connection. The blocking command will return and -can be finally reissued. +To obtain this behavior the following pattern is used. The process uses +an additional *control connection* in order to send the `CLIENT UNBLOCK` command +if needed. In the meantime, before running the blocking operation on the other +connections, the process runs `CLIENT ID` in order to get the ID associated +with that connection. When a new key should be added, or when a key should +no longer be monitored, the relevant connection blocking command is aborted +by sending `CLIENT UNBLOCK` in the control connection. The blocking command +will return and can be finally reissued. -This example shows the application in the context of Redis streams, however the -pattern is a general one and can be applied to other cases. +This example shows the application in the context of Redis streams, however +the pattern is a general one and can be applied to other cases. -@example +@examples ``` Connection A (blocking connection): @@ -59,5 +54,5 @@ NULL @integer-reply, specifically: -- `1` if the client was unblocked successfully. -- `0` if the client wasn't unblocked. +* `1` if the client was unblocked successfully. +* `0` if the client wasn't unblocked. diff --git a/iredis/data/commands/client-unpause.md b/iredis/data/commands/client-unpause.md index 7d8dbb7..c438485 100644 --- a/iredis/data/commands/client-unpause.md +++ b/iredis/data/commands/client-unpause.md @@ -1,5 +1,4 @@ -`CLIENT UNPAUSE` is used to resume command processing for all clients that were -paused by `CLIENT PAUSE`. +`CLIENT UNPAUSE` is used to resume command processing for all clients that were paused by `CLIENT PAUSE`. @return diff --git a/iredis/data/commands/client.md b/iredis/data/commands/client.md new file mode 100644 index 0000000..fdfd0e8 --- /dev/null +++ b/iredis/data/commands/client.md @@ -0,0 +1,3 @@ +This is a container command for client connection commands. + +To see the list of available commands you can call `CLIENT HELP`.
\ No newline at end of file diff --git a/iredis/data/commands/cluster-addslots.md b/iredis/data/commands/cluster-addslots.md index b7b777a..0604066 100644 --- a/iredis/data/commands/cluster-addslots.md +++ b/iredis/data/commands/cluster-addslots.md @@ -1,55 +1,51 @@ This command is useful in order to modify a node's view of the cluster -configuration. Specifically it assigns a set of hash slots to the node receiving -the command. If the command is successful, the node will map the specified hash -slots to itself, and will start broadcasting the new configuration. +configuration. Specifically it assigns a set of hash slots to the node +receiving the command. If the command is successful, the node will map +the specified hash slots to itself, and will start broadcasting the new +configuration. However note that: -1. The command only works if all the specified slots are, from the point of view - of the node receiving the command, currently not assigned. A node will refuse - to take ownership for slots that already belong to some other node (including - itself). +1. The command only works if all the specified slots are, from the point of view of the node receiving the command, currently not assigned. A node will refuse to take ownership for slots that already belong to some other node (including itself). 2. The command fails if the same slot is specified multiple times. -3. As a side effect of the command execution, if a slot among the ones specified - as argument is set as `importing`, this state gets cleared once the node - assigns the (previously unbound) slot to itself. +3. As a side effect of the command execution, if a slot among the ones specified as argument is set as `importing`, this state gets cleared once the node assigns the (previously unbound) slot to itself. ## Example -For example the following command assigns slots 1 2 3 to the node receiving the -command: +For example the following command assigns slots 1 2 3 to the node receiving +the command: > CLUSTER ADDSLOTS 1 2 3 OK -However trying to execute it again results into an error since the slots are -already assigned: +However trying to execute it again results into an error since the slots +are already assigned: > CLUSTER ADDSLOTS 1 2 3 ERR Slot 1 is already busy ## Usage in Redis Cluster -This command only works in cluster mode and is useful in the following Redis -Cluster operations: +This command only works in cluster mode and is useful in the following +Redis Cluster operations: -1. To create a new cluster ADDSLOTS is used in order to initially setup master - nodes splitting the available hash slots among them. +1. To create a new cluster ADDSLOTS is used in order to initially setup master nodes splitting the available hash slots among them. 2. In order to fix a broken cluster where certain slots are unassigned. ## Information about slots propagation and warnings Note that once a node assigns a set of slots to itself, it will start -propagating this information in heartbeat packet headers. However the other -nodes will accept the information only if they have the slot as not already -bound with another node, or if the configuration epoch of the node advertising -the new hash slot, is greater than the node currently listed in the table. +propagating this information in heartbeat packet headers. However the +other nodes will accept the information only if they have the slot as +not already bound with another node, or if the configuration epoch of the +node advertising the new hash slot, is greater than the node currently listed +in the table. This means that this command should be used with care only by applications -orchestrating Redis Cluster, like `redis-cli`, and the command if used out of -the right context can leave the cluster in a wrong state or cause data loss. +orchestrating Redis Cluster, like `redis-cli`, and the command if used +out of the right context can leave the cluster in a wrong state or cause +data loss. @return -@simple-string-reply: `OK` if the command was successful. Otherwise an error is -returned. +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/iredis/data/commands/cluster-addslotsrange.md b/iredis/data/commands/cluster-addslotsrange.md new file mode 100644 index 0000000..a00e23f --- /dev/null +++ b/iredis/data/commands/cluster-addslotsrange.md @@ -0,0 +1,27 @@ +The `CLUSTER ADDSLOTSRANGE` is similar to the `CLUSTER ADDSLOTS` command in that they both assign hash slots to nodes. + +The difference between the two commands is that `ADDSLOTS` takes a list of slots to assign to the node, while `ADDSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to assign to the node. + +## Example + +To assign slots 1 2 3 4 5 to the node, the `ADDSLOTS` command is: + + > CLUSTER ADDSLOTS 1 2 3 4 5 + OK + +The same operation can be completed with the following `ADDSLOTSRANGE` command: + + > CLUSTER ADDSLOTSRANGE 1 5 + OK + + +## Usage in Redis Cluster + +This command only works in cluster mode and is useful in the following Redis Cluster operations: + +1. To create a new cluster ADDSLOTSRANGE is used in order to initially setup master nodes splitting the available hash slots among them. +2. In order to fix a broken cluster where certain slots are unassigned. + +@return + +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/iredis/data/commands/cluster-bumpepoch.md b/iredis/data/commands/cluster-bumpepoch.md index 16a94a4..b05694a 100644 --- a/iredis/data/commands/cluster-bumpepoch.md +++ b/iredis/data/commands/cluster-bumpepoch.md @@ -1,15 +1,9 @@ Advances the cluster config epoch. -The `CLUSTER BUMPEPOCH` command triggers an increment to the cluster's config -epoch from the connected node. The epoch will be incremented if the node's -config epoch is zero, or if it is less than the cluster's greatest epoch. +The `CLUSTER BUMPEPOCH` command triggers an increment to the cluster's config epoch from the connected node. The epoch will be incremented if the node's config epoch is zero, or if it is less than the cluster's greatest epoch. -**Note:** config epoch management is performed internally by the cluster, and -relies on obtaining a consensus of nodes. The `CLUSTER BUMPEPOCH` attempts to -increment the config epoch **WITHOUT** getting the consensus, so using it may -violate the "last failover wins" rule. Use it with caution. +**Note:** config epoch management is performed internally by the cluster, and relies on obtaining a consensus of nodes. The `CLUSTER BUMPEPOCH` attempts to increment the config epoch **WITHOUT** getting the consensus, so using it may violate the "last failover wins" rule. Use it with caution. @return -@simple-string-reply: `BUMPED` if the epoch was incremented, or `STILL` if the -node already has the greatest config epoch in the cluster. +@simple-string-reply: `BUMPED` if the epoch was incremented, or `STILL` if the node already has the greatest config epoch in the cluster. diff --git a/iredis/data/commands/cluster-count-failure-reports.md b/iredis/data/commands/cluster-count-failure-reports.md index bb3c937..ac1ef71 100644 --- a/iredis/data/commands/cluster-count-failure-reports.md +++ b/iredis/data/commands/cluster-count-failure-reports.md @@ -1,33 +1,21 @@ -The command returns the number of _failure reports_ for the specified node. -Failure reports are the way Redis Cluster uses in order to promote a `PFAIL` -state, that means a node is not reachable, to a `FAIL` state, that means that -the majority of masters in the cluster agreed within a window of time that the -node is not reachable. +The command returns the number of *failure reports* for the specified node. +Failure reports are the way Redis Cluster uses in order to promote a +`PFAIL` state, that means a node is not reachable, to a `FAIL` state, +that means that the majority of masters in the cluster agreed within +a window of time that the node is not reachable. A few more details: -- A node flags another node with `PFAIL` when the node is not reachable for a - time greater than the configured _node timeout_, which is a fundamental - configuration parameter of a Redis Cluster. -- Nodes in `PFAIL` state are provided in gossip sections of heartbeat packets. -- Every time a node processes gossip packets from other nodes, it creates (and - refreshes the TTL if needed) **failure reports**, remembering that a given - node said another given node is in `PFAIL` condition. -- Each failure report has a time to live of two times the _node timeout_ time. -- If at a given time a node has another node flagged with `PFAIL`, and at the - same time collected the majority of other master nodes _failure reports_ about - this node (including itself if it is a master), then it elevates the failure - state of the node from `PFAIL` to `FAIL`, and broadcasts a message forcing all - the nodes that can be reached to flag the node as `FAIL`. +* A node flags another node with `PFAIL` when the node is not reachable for a time greater than the configured *node timeout*, which is a fundamental configuration parameter of a Redis Cluster. +* Nodes in `PFAIL` state are provided in gossip sections of heartbeat packets. +* Every time a node processes gossip packets from other nodes, it creates (and refreshes the TTL if needed) **failure reports**, remembering that a given node said another given node is in `PFAIL` condition. +* Each failure report has a time to live of two times the *node timeout* time. +* If at a given time a node has another node flagged with `PFAIL`, and at the same time collected the majority of other master nodes *failure reports* about this node (including itself if it is a master), then it elevates the failure state of the node from `PFAIL` to `FAIL`, and broadcasts a message forcing all the nodes that can be reached to flag the node as `FAIL`. -This command returns the number of failure reports for the current node which -are currently not expired (so received within two times the _node timeout_ -time). The count does not include what the node we are asking this count -believes about the node ID we pass as argument, the count _only_ includes the -failure reports the node received from other nodes. +This command returns the number of failure reports for the current node which are currently not expired (so received within two times the *node timeout* time). The count does not include what the node we are asking this count believes about the node ID we pass as argument, the count *only* includes the failure reports the node received from other nodes. -This command is mainly useful for debugging, when the failure detector of Redis -Cluster is not operating as we believe it should. +This command is mainly useful for debugging, when the failure detector of +Redis Cluster is not operating as we believe it should. @return diff --git a/iredis/data/commands/cluster-countkeysinslot.md b/iredis/data/commands/cluster-countkeysinslot.md index 92ec7c6..0bffec8 100644 --- a/iredis/data/commands/cluster-countkeysinslot.md +++ b/iredis/data/commands/cluster-countkeysinslot.md @@ -1,6 +1,7 @@ -Returns the number of keys in the specified Redis Cluster hash slot. The command -only queries the local data set, so contacting a node that is not serving the -specified hash slot will always result in a count of zero being returned. +Returns the number of keys in the specified Redis Cluster hash slot. The +command only queries the local data set, so contacting a node +that is not serving the specified hash slot will always result in a count of +zero being returned. ``` > CLUSTER COUNTKEYSINSLOT 7000 @@ -9,5 +10,4 @@ specified hash slot will always result in a count of zero being returned. @return -@integer-reply: The number of keys in the specified hash slot, or an error if -the hash slot is invalid. +@integer-reply: The number of keys in the specified hash slot, or an error if the hash slot is invalid. diff --git a/iredis/data/commands/cluster-delslots.md b/iredis/data/commands/cluster-delslots.md index bd41bd7..77204e1 100644 --- a/iredis/data/commands/cluster-delslots.md +++ b/iredis/data/commands/cluster-delslots.md @@ -1,47 +1,48 @@ -In Redis Cluster, each node keeps track of which master is serving a particular -hash slot. - -The `DELSLOTS` command asks a particular Redis Cluster node to forget which -master is serving the hash slots specified as arguments. - -In the context of a node that has received a `DELSLOTS` command and has -consequently removed the associations for the passed hash slots, we say those -hash slots are _unbound_. Note that the existence of unbound hash slots occurs -naturally when a node has not been configured to handle them (something that can -be done with the `ADDSLOTS` command) and if it has not received any information -about who owns those hash slots (something that it can learn from heartbeat or -update messages). - -If a node with unbound hash slots receives a heartbeat packet from another node -that claims to be the owner of some of those hash slots, the association is -established instantly. Moreover, if a heartbeat or update message is received -with a configuration epoch greater than the node's own, the association is -re-established. +In Redis Cluster, each node keeps track of which master is serving +a particular hash slot. + +The `CLUSTER DELSLOTS` command asks a particular Redis Cluster node to +forget which master is serving the hash slots specified as arguments. + +In the context of a node that has received a `CLUSTER DELSLOTS` command and +has consequently removed the associations for the passed hash slots, +we say those hash slots are *unbound*. Note that the existence of +unbound hash slots occurs naturally when a node has not been +configured to handle them (something that can be done with the +`CLUSTER ADDSLOTS` command) and if it has not received any information about +who owns those hash slots (something that it can learn from heartbeat +or update messages). + +If a node with unbound hash slots receives a heartbeat packet from +another node that claims to be the owner of some of those hash +slots, the association is established instantly. Moreover, if a +heartbeat or update message is received with a configuration epoch +greater than the node's own, the association is re-established. However, note that: -1. The command only works if all the specified slots are already associated with - some node. +1. The command only works if all the specified slots are already +associated with some node. 2. The command fails if the same slot is specified multiple times. -3. As a side effect of the command execution, the node may go into _down_ state - because not all hash slots are covered. +3. As a side effect of the command execution, the node may go into +*down* state because not all hash slots are covered. ## Example -The following command removes the association for slots 5000 and 5001 from the -node receiving the command: +The following command removes the association for slots 5000 and +5001 from the node receiving the command: > CLUSTER DELSLOTS 5000 5001 OK ## Usage in Redis Cluster -This command only works in cluster mode and may be useful for debugging and in -order to manually orchestrate a cluster configuration when a new cluster is -created. It is currently not used by `redis-cli`, and mainly exists for API -completeness. +This command only works in cluster mode and may be useful for +debugging and in order to manually orchestrate a cluster configuration +when a new cluster is created. It is currently not used by `redis-cli`, +and mainly exists for API completeness. @return -@simple-string-reply: `OK` if the command was successful. Otherwise an error is -returned. +@simple-string-reply: `OK` if the command was successful. Otherwise +an error is returned. diff --git a/iredis/data/commands/cluster-delslotsrange.md b/iredis/data/commands/cluster-delslotsrange.md new file mode 100644 index 0000000..e4c1f2b --- /dev/null +++ b/iredis/data/commands/cluster-delslotsrange.md @@ -0,0 +1,32 @@ +The `CLUSTER DELSLOTSRANGE` command is similar to the `CLUSTER DELSLOTS` command in that they both remove hash slots from the node. +The difference is that `CLUSTER DELSLOTS` takes a list of hash slots to remove from the node, while `CLUSTER DELSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to remove from the node. + +## Example + +To remove slots 1 2 3 4 5 from the node, the `CLUSTER DELSLOTS` command is: + + > CLUSTER DELSLOTS 1 2 3 4 5 + OK + +The same operation can be completed with the following `CLUSTER DELSLOTSRANGE` command: + + > CLUSTER DELSLOTSRANGE 1 5 + OK + +However, note that: + +1. The command only works if all the specified slots are already associated with the node. +2. The command fails if the same slot is specified multiple times. +3. As a side effect of the command execution, the node may go into *down* state because not all hash slots are covered. + +## Usage in Redis Cluster + +This command only works in cluster mode and may be useful for +debugging and in order to manually orchestrate a cluster configuration +when a new cluster is created. It is currently not used by `redis-cli`, +and mainly exists for API completeness. + +@return + +@simple-string-reply: `OK` if the command was successful. Otherwise +an error is returned. diff --git a/iredis/data/commands/cluster-failover.md b/iredis/data/commands/cluster-failover.md index c811c04..911eaea 100644 --- a/iredis/data/commands/cluster-failover.md +++ b/iredis/data/commands/cluster-failover.md @@ -1,81 +1,67 @@ -This command, that can only be sent to a Redis Cluster replica node, forces the -replica to start a manual failover of its master instance. +This command, that can only be sent to a Redis Cluster replica node, forces +the replica to start a manual failover of its master instance. A manual failover is a special kind of failover that is usually executed when -there are no actual failures, but we wish to swap the current master with one of -its replicas (which is the node we send the command to), in a safe way, without -any window for data loss. It works in the following way: +there are no actual failures, but we wish to swap the current master with one +of its replicas (which is the node we send the command to), in a safe way, +without any window for data loss. It works in the following way: 1. The replica tells the master to stop processing queries from clients. -2. The master replies to the replica with the current _replication offset_. -3. The replica waits for the replication offset to match on its side, to make - sure it processed all the data from the master before it continues. -4. The replica starts a failover, obtains a new configuration epoch from the - majority of the masters, and broadcasts the new configuration. -5. The old master receives the configuration update: unblocks its clients and - starts replying with redirection messages so that they'll continue the chat - with the new master. - -This way clients are moved away from the old master to the new master atomically -and only when the replica that is turning into the new master has processed all -of the replication stream from the old master. +2. The master replies to the replica with the current *replication offset*. +3. The replica waits for the replication offset to match on its side, to make sure it processed all the data from the master before it continues. +4. The replica starts a failover, obtains a new configuration epoch from the majority of the masters, and broadcasts the new configuration. +5. The old master receives the configuration update: unblocks its clients and starts replying with redirection messages so that they'll continue the chat with the new master. + +This way clients are moved away from the old master to the new master +atomically and only when the replica that is turning into the new master +has processed all of the replication stream from the old master. ## FORCE option: manual failover when the master is down The command behavior can be modified by two options: **FORCE** and **TAKEOVER**. If the **FORCE** option is given, the replica does not perform any handshake -with the master, that may be not reachable, but instead just starts a failover -ASAP starting from point 4. This is useful when we want to start a manual -failover while the master is no longer reachable. +with the master, that may be not reachable, but instead just starts a +failover ASAP starting from point 4. This is useful when we want to start +a manual failover while the master is no longer reachable. -However using **FORCE** we still need the majority of masters to be available in -order to authorize the failover and generate a new configuration epoch for the -replica that is going to become master. +However using **FORCE** we still need the majority of masters to be available +in order to authorize the failover and generate a new configuration epoch +for the replica that is going to become master. ## TAKEOVER option: manual failover without cluster consensus There are situations where this is not enough, and we want a replica to failover -without any agreement with the rest of the cluster. A real world use case for -this is to mass promote replicas in a different data center to masters in order -to perform a data center switch, while all the masters are down or partitioned -away. +without any agreement with the rest of the cluster. A real world use case +for this is to mass promote replicas in a different data center to masters +in order to perform a data center switch, while all the masters are down +or partitioned away. -The **TAKEOVER** option implies everything **FORCE** implies, but also does not -uses any cluster authorization in order to failover. A replica receiving +The **TAKEOVER** option implies everything **FORCE** implies, but also does +not uses any cluster authorization in order to failover. A replica receiving `CLUSTER FAILOVER TAKEOVER` will instead: -1. Generate a new `configEpoch` unilaterally, just taking the current greatest - epoch available and incrementing it if its local configuration epoch is not - already the greatest. -2. Assign itself all the hash slots of its master, and propagate the new - configuration to every node which is reachable ASAP, and eventually to every - other node. - -Note that **TAKEOVER violates the last-failover-wins principle** of Redis -Cluster, since the configuration epoch generated by the replica violates the -normal generation of configuration epochs in several ways: - -1. There is no guarantee that it is actually the higher configuration epoch, - since, for example, we can use the **TAKEOVER** option within a minority, nor - any message exchange is performed to generate the new configuration epoch. -2. If we generate a configuration epoch which happens to collide with another - instance, eventually our configuration epoch, or the one of another instance - with our same epoch, will be moved away using the _configuration epoch - collision resolution algorithm_. +1. Generate a new `configEpoch` unilaterally, just taking the current greatest epoch available and incrementing it if its local configuration epoch is not already the greatest. +2. Assign itself all the hash slots of its master, and propagate the new configuration to every node which is reachable ASAP, and eventually to every other node. + +Note that **TAKEOVER violates the last-failover-wins principle** of Redis Cluster, since the configuration epoch generated by the replica violates the normal generation of configuration epochs in several ways: + +1. There is no guarantee that it is actually the higher configuration epoch, since, for example, we can use the **TAKEOVER** option within a minority, nor any message exchange is performed to generate the new configuration epoch. +2. If we generate a configuration epoch which happens to collide with another instance, eventually our configuration epoch, or the one of another instance with our same epoch, will be moved away using the *configuration epoch collision resolution algorithm*. Because of this the **TAKEOVER** option should be used with care. ## Implementation details and notes -`CLUSTER FAILOVER`, unless the **TAKEOVER** option is specified, does not -execute a failover synchronously, it only _schedules_ a manual failover, -bypassing the failure detection stage, so to check if the failover actually -happened, `CLUSTER NODES` or other means should be used in order to verify that -the state of the cluster changes after some time the command was sent. +* `CLUSTER FAILOVER`, unless the **TAKEOVER** option is specified, does not execute a failover synchronously. + It only *schedules* a manual failover, bypassing the failure detection stage. +* An `OK` reply is no guarantee that the failover will succeed. +* A replica can only be promoted to a master if it is known as a replica by a majority of the masters in the cluster. + If the replica is a new node that has just been added to the cluster (for example after upgrading it), it may not yet be known to all the masters in the cluster. + To check that the masters are aware of a new replica, you can send `CLUSTER NODES` or `CLUSTER REPLICAS` to each of the master nodes and check that it appears as a replica, before sending `CLUSTER FAILOVER` to the replica. +* To check that the failover has actually happened you can use `ROLE`, `INFO REPLICATION` (which indicates "role:master" after successful failover), or `CLUSTER NODES` to verify that the state of the cluster has changed sometime after the command was sent. +* To check if the failover has failed, check the replica's log for "Manual failover timed out", which is logged if the replica has given up after a few seconds. @return -@simple-string-reply: `OK` if the command was accepted and a manual failover is -going to be attempted. An error if the operation cannot be executed, for example -if we are talking with a node which is already a master. +@simple-string-reply: `OK` if the command was accepted and a manual failover is going to be attempted. An error if the operation cannot be executed, for example if we are talking with a node which is already a master. diff --git a/iredis/data/commands/cluster-flushslots.md b/iredis/data/commands/cluster-flushslots.md index 8974a1d..b0b3fdf 100644 --- a/iredis/data/commands/cluster-flushslots.md +++ b/iredis/data/commands/cluster-flushslots.md @@ -1,7 +1,6 @@ Deletes all slots from a node. -The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected -node. It can only be called when the database is empty. +The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected node. It can only be called when the database is empty. @return diff --git a/iredis/data/commands/cluster-forget.md b/iredis/data/commands/cluster-forget.md index 7926091..6bff506 100644 --- a/iredis/data/commands/cluster-forget.md +++ b/iredis/data/commands/cluster-forget.md @@ -1,45 +1,45 @@ -The command is used in order to remove a node, specified via its node ID, from -the set of _known nodes_ of the Redis Cluster node receiving the command. In -other words the specified node is removed from the _nodes table_ of the node -receiving the command. +The command is used in order to remove a node, specified via its node ID, +from the set of *known nodes* of the Redis Cluster node receiving the command. +In other words the specified node is removed from the *nodes table* of the +node receiving the command. Because when a given node is part of the cluster, all the other nodes participating in the cluster knows about it, in order for a node to be -completely removed from a cluster, the `CLUSTER FORGET` command must be sent to -all the remaining nodes, regardless of the fact they are masters or replicas. +completely removed from a cluster, the `CLUSTER FORGET` command must be +sent to all the remaining nodes, regardless of the fact they are masters +or replicas. -However the command cannot simply drop the node from the internal node table of -the node receiving the command, it also implements a ban-list, not allowing the -same node to be added again as a side effect of processing the _gossip section_ -of the heartbeat packets received from other nodes. +However the command cannot simply drop the node from the internal node +table of the node receiving the command, it also implements a ban-list, not +allowing the same node to be added again as a side effect of processing the +*gossip section* of the heartbeat packets received from other nodes. ## Details on why the ban-list is needed -In the following example we'll show why the command must not just remove a given -node from the nodes table, but also prevent it for being re-inserted again for -some time. +In the following example we'll show why the command must not just remove +a given node from the nodes table, but also prevent it for being re-inserted +again for some time. -Let's assume we have four nodes, A, B, C and D. In order to end with just a -three nodes cluster A, B, C we may follow these steps: +Let's assume we have four nodes, A, B, C and D. In order to +end with just a three nodes cluster A, B, C we may follow these steps: 1. Reshard all the hash slots from D to nodes A, B, C. 2. D is now empty, but still listed in the nodes table of A, B and C. 3. We contact A, and send `CLUSTER FORGET D`. 4. B sends node A a heartbeat packet, where node D is listed. -5. A does no longer known node D (see step 3), so it starts an handshake with D. +5. A does no longer known node D (see step 3), so it starts a handshake with D. 6. D ends re-added in the nodes table of A. As you can see in this way removing a node is fragile, we need to send -`CLUSTER FORGET` commands to all the nodes ASAP hoping there are no gossip -sections processing in the meantime. Because of this problem the command -implements a ban-list with an expire time for each entry. +`CLUSTER FORGET` commands to all the nodes ASAP hoping there are no +gossip sections processing in the meantime. Because of this problem the +command implements a ban-list with an expire time for each entry. So what the command really does is: 1. The specified node gets removed from the nodes table. 2. The node ID of the removed node gets added to the ban-list, for 1 minute. -3. The node will skip all the node IDs listed in the ban-list when processing - gossip sections received in heartbeat packets from other nodes. +3. The node will skip all the node IDs listed in the ban-list when processing gossip sections received in heartbeat packets from other nodes. This way we have a 60 second window to inform all the nodes in the cluster that we want to remove a node. @@ -49,11 +49,9 @@ we want to remove a node. The command does not succeed and returns an error in the following cases: 1. The specified node ID is not found in the nodes table. -2. The node receiving the command is a replica, and the specified node ID - identifies its current master. +2. The node receiving the command is a replica, and the specified node ID identifies its current master. 3. The node ID identifies the same node we are sending the command to. @return -@simple-string-reply: `OK` if the command was executed successfully, otherwise -an error is returned. +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/iredis/data/commands/cluster-getkeysinslot.md b/iredis/data/commands/cluster-getkeysinslot.md index 9faa62d..120bf44 100644 --- a/iredis/data/commands/cluster-getkeysinslot.md +++ b/iredis/data/commands/cluster-getkeysinslot.md @@ -1,20 +1,20 @@ The command returns an array of keys names stored in the contacted node and -hashing to the specified hash slot. The maximum number of keys to return is -specified via the `count` argument, so that it is possible for the user of this -API to batch-processing keys. +hashing to the specified hash slot. The maximum number of keys to return +is specified via the `count` argument, so that it is possible for the user +of this API to batch-processing keys. The main usage of this command is during rehashing of cluster slots from one node to another. The way the rehashing is performed is exposed in the Redis -Cluster specification, or in a more simple to digest form, as an appendix of the -`CLUSTER SETSLOT` command documentation. +Cluster specification, or in a more simple to digest form, as an appendix +of the `CLUSTER SETSLOT` command documentation. ``` > CLUSTER GETKEYSINSLOT 7000 3 -"47344|273766|70329104160040|key_39015" -"47344|273766|70329104160040|key_89793" -"47344|273766|70329104160040|key_92937" +1) "key_39015" +2) "key_89793" +3) "key_92937" ``` @return -@array-reply: From 0 to _count_ key names in a Redis array reply. +@array-reply: From 0 to *count* key names in a Redis array reply. diff --git a/iredis/data/commands/cluster-help.md b/iredis/data/commands/cluster-help.md new file mode 100644 index 0000000..3b1e159 --- /dev/null +++ b/iredis/data/commands/cluster-help.md @@ -0,0 +1,5 @@ +The `CLUSTER HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/cluster-info.md b/iredis/data/commands/cluster-info.md index 550dd0c..372cc73 100644 --- a/iredis/data/commands/cluster-info.md +++ b/iredis/data/commands/cluster-info.md @@ -1,6 +1,5 @@ -`CLUSTER INFO` provides `INFO` style information about Redis Cluster vital -parameters. The following is a sample output, followed by the description of -each field reported. +`CLUSTER INFO` provides `INFO` style information about Redis Cluster vital parameters. +The following fields are always present in the reply: ``` cluster_state:ok @@ -14,43 +13,40 @@ cluster_current_epoch:6 cluster_my_epoch:2 cluster_stats_messages_sent:1483972 cluster_stats_messages_received:1483968 +total_cluster_links_buffer_limit_exceeded:0 ``` -- `cluster_state`: State is `ok` if the node is able to receive queries. `fail` - if there is at least one hash slot which is unbound (no node associated), in - error state (node serving it is flagged with FAIL flag), or if the majority of - masters can't be reached by this node. -- `cluster_slots_assigned`: Number of slots which are associated to some node - (not unbound). This number should be 16384 for the node to work properly, - which means that each hash slot should be mapped to a node. -- `cluster_slots_ok`: Number of hash slots mapping to a node not in `FAIL` or - `PFAIL` state. -- `cluster_slots_pfail`: Number of hash slots mapping to a node in `PFAIL` - state. Note that those hash slots still work correctly, as long as the `PFAIL` - state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` - only means that we are currently not able to talk with the node, but may be - just a transient error. -- `cluster_slots_fail`: Number of hash slots mapping to a node in `FAIL` state. - If this number is not zero the node is not able to serve queries unless - `cluster-require-full-coverage` is set to `no` in the configuration. -- `cluster_known_nodes`: The total number of known nodes in the cluster, - including nodes in `HANDSHAKE` state that may not currently be proper members - of the cluster. -- `cluster_size`: The number of master nodes serving at least one hash slot in - the cluster. -- `cluster_current_epoch`: The local `Current Epoch` variable. This is used in - order to create unique increasing version numbers during fail overs. -- `cluster_my_epoch`: The `Config Epoch` of the node we are talking with. This - is the current configuration version assigned to this node. -- `cluster_stats_messages_sent`: Number of messages sent via the cluster - node-to-node binary bus. -- `cluster_stats_messages_received`: Number of messages received via the cluster - node-to-node binary bus. +* `cluster_state`: State is `ok` if the node is able to receive queries. `fail` if there is at least one hash slot which is unbound (no node associated), in error state (node serving it is flagged with FAIL flag), or if the majority of masters can't be reached by this node. +* `cluster_slots_assigned`: Number of slots which are associated to some node (not unbound). This number should be 16384 for the node to work properly, which means that each hash slot should be mapped to a node. +* `cluster_slots_ok`: Number of hash slots mapping to a node not in `FAIL` or `PFAIL` state. +* `cluster_slots_pfail`: Number of hash slots mapping to a node in `PFAIL` state. Note that those hash slots still work correctly, as long as the `PFAIL` state is not promoted to `FAIL` by the failure detection algorithm. `PFAIL` only means that we are currently not able to talk with the node, but may be just a transient error. +* `cluster_slots_fail`: Number of hash slots mapping to a node in `FAIL` state. If this number is not zero the node is not able to serve queries unless `cluster-require-full-coverage` is set to `no` in the configuration. +* `cluster_known_nodes`: The total number of known nodes in the cluster, including nodes in `HANDSHAKE` state that may not currently be proper members of the cluster. +* `cluster_size`: The number of master nodes serving at least one hash slot in the cluster. +* `cluster_current_epoch`: The local `Current Epoch` variable. This is used in order to create unique increasing version numbers during fail overs. +* `cluster_my_epoch`: The `Config Epoch` of the node we are talking with. This is the current configuration version assigned to this node. +* `cluster_stats_messages_sent`: Number of messages sent via the cluster node-to-node binary bus. +* `cluster_stats_messages_received`: Number of messages received via the cluster node-to-node binary bus. +* `total_cluster_links_buffer_limit_exceeded`: Accumulated count of cluster links freed due to exceeding the `cluster-link-sendbuf-limit` configuration. -More information about the Current Epoch and Config Epoch variables are -available in the Redis Cluster specification document. +The following message-related fields may be included in the reply if the value is not 0: +Each message type includes statistics on the number of messages sent and received. +Here are the explanation of these fields: + +* `cluster_stats_messages_ping_sent` and `cluster_stats_messages_ping_received`: Cluster bus PING (not to be confused with the client command `PING`). +* `cluster_stats_messages_pong_sent` and `cluster_stats_messages_pong_received`: PONG (reply to PING). +* `cluster_stats_messages_meet_sent` and `cluster_stats_messages_meet_received`: Handshake message sent to a new node, either through gossip or `CLUSTER MEET`. +* `cluster_stats_messages_fail_sent` and `cluster_stats_messages_fail_received`: Mark node xxx as failing. +* `cluster_stats_messages_publish_sent` and `cluster_stats_messages_publish_received`: Pub/Sub Publish propagation, see [Pubsub](/topics/pubsub#pubsub). +* `cluster_stats_messages_auth-req_sent` and `cluster_stats_messages_auth-req_received`: Replica initiated leader election to replace its master. +* `cluster_stats_messages_auth-ack_sent` and `cluster_stats_messages_auth-ack_received`: Message indicating a vote during leader election. +* `cluster_stats_messages_update_sent` and `cluster_stats_messages_update_received`: Another node slots configuration. +* `cluster_stats_messages_mfstart_sent` and `cluster_stats_messages_mfstart_received`: Pause clients for manual failover. +* `cluster_stats_messages_module_sent` and `cluster_stats_messages_module_received`: Module cluster API message. +* `cluster_stats_messages_publishshard_sent` and `cluster_stats_messages_publishshard_received`: Pub/Sub Publish shard propagation, see [Sharded Pubsub](/topics/pubsub#sharded-pubsub). + +More information about the Current Epoch and Config Epoch variables are available in the [Redis Cluster specification document](/topics/cluster-spec#cluster-current-epoch). @return -@bulk-string-reply: A map between named fields and values in the form of -`<field>:<value>` lines separated by newlines composed by the two bytes `CRLF`. +@bulk-string-reply: A map between named fields and values in the form of `<field>:<value>` lines separated by newlines composed by the two bytes `CRLF`. diff --git a/iredis/data/commands/cluster-keyslot.md b/iredis/data/commands/cluster-keyslot.md index 5f08e79..7e03587 100644 --- a/iredis/data/commands/cluster-keyslot.md +++ b/iredis/data/commands/cluster-keyslot.md @@ -1,31 +1,23 @@ -Returns an integer identifying the hash slot the specified key hashes to. This -command is mainly useful for debugging and testing, since it exposes via an API -the underlying Redis implementation of the hashing algorithm. Example use cases -for this command: +Returns an integer identifying the hash slot the specified key hashes to. +This command is mainly useful for debugging and testing, since it exposes +via an API the underlying Redis implementation of the hashing algorithm. +Example use cases for this command: -1. Client libraries may use Redis in order to test their own hashing algorithm, - generating random keys and hashing them with both their local implementation - and using Redis `CLUSTER KEYSLOT` command, then checking if the result is the - same. -2. Humans may use this command in order to check what is the hash slot, and then - the associated Redis Cluster node, responsible for a given key. +1. Client libraries may use Redis in order to test their own hashing algorithm, generating random keys and hashing them with both their local implementation and using Redis `CLUSTER KEYSLOT` command, then checking if the result is the same. +2. Humans may use this command in order to check what is the hash slot, and then the associated Redis Cluster node, responsible for a given key. ## Example ``` > CLUSTER KEYSLOT somekey -11058 +(integer) 11058 > CLUSTER KEYSLOT foo{hash_tag} (integer) 2515 > CLUSTER KEYSLOT bar{hash_tag} (integer) 2515 ``` -Note that the command implements the full hashing algorithm, including support -for **hash tags**, that is the special property of Redis Cluster key hashing -algorithm, of hashing just what is between `{` and `}` if such a pattern is -found inside the key name, in order to force multiple keys to be handled by the -same node. +Note that the command implements the full hashing algorithm, including support for **hash tags**, that is the special property of Redis Cluster key hashing algorithm, of hashing just what is between `{` and `}` if such a pattern is found inside the key name, in order to force multiple keys to be handled by the same node. @return diff --git a/iredis/data/commands/cluster-links.md b/iredis/data/commands/cluster-links.md new file mode 100644 index 0000000..7b33762 --- /dev/null +++ b/iredis/data/commands/cluster-links.md @@ -0,0 +1,48 @@ +Each node in a Redis Cluster maintains a pair of long-lived TCP link with each peer in the cluster: One for sending outbound messages towards the peer and one for receiving inbound messages from the peer. + +`CLUSTER LINKS` outputs information of all such peer links as an array, where each array element is a map that contains attributes and their values for an individual link. + +@examples + +The following is an example output: + +``` +> CLUSTER LINKS +1) 1) "direction" + 2) "to" + 3) "node" + 4) "8149d745fa551e40764fecaf7cab9dbdf6b659ae" + 5) "create-time" + 6) (integer) 1639442739375 + 7) "events" + 8) "rw" + 9) "send-buffer-allocated" + 10) (integer) 4512 + 11) "send-buffer-used" + 12) (integer) 0 +2) 1) "direction" + 2) "from" + 3) "node" + 4) "8149d745fa551e40764fecaf7cab9dbdf6b659ae" + 5) "create-time" + 6) (integer) 1639442739411 + 7) "events" + 8) "r" + 9) "send-buffer-allocated" + 10) (integer) 0 + 11) "send-buffer-used" + 12) (integer) 0 +``` + +Each map is composed of the following attributes of the corresponding cluster link and their values: + +1. `direction`: This link is established by the local node `to` the peer, or accepted by the local node `from` the peer. +2. `node`: The node id of the peer. +3. `create-time`: Creation time of the link. (In the case of a `to` link, this is the time when the TCP link is created by the local node, not the time when it is actually established.) +4. `events`: Events currently registered for the link. `r` means readable event, `w` means writable event. +5. `send-buffer-allocated`: Allocated size of the link's send buffer, which is used to buffer outgoing messages toward the peer. +6. `send-buffer-used`: Size of the portion of the link's send buffer that is currently holding data(messages). + +@return + +@array-reply: An array of maps where each map contains various attributes and their values of a cluster link. diff --git a/iredis/data/commands/cluster-meet.md b/iredis/data/commands/cluster-meet.md index 3402faa..b33c9fb 100644 --- a/iredis/data/commands/cluster-meet.md +++ b/iredis/data/commands/cluster-meet.md @@ -2,54 +2,41 @@ support enabled, into a working cluster. The basic idea is that nodes by default don't trust each other, and are -considered unknown, so that it is unlikely that different cluster nodes will mix -into a single one because of system administration errors or network addresses -modifications. +considered unknown, so that it is unlikely that different cluster nodes will +mix into a single one because of system administration errors or network +addresses modifications. So in order for a given node to accept another one into the list of nodes composing a Redis Cluster, there are only two ways: -1. The system administrator sends a `CLUSTER MEET` command to force a node to - meet another one. -2. An already known node sends a list of nodes in the gossip section that we are - not aware of. If the receiving node trusts the sending node as a known node, - it will process the gossip section and send an handshake to the nodes that - are still not known. +1. The system administrator sends a `CLUSTER MEET` command to force a node to meet another one. +2. An already known node sends a list of nodes in the gossip section that we are not aware of. If the receiving node trusts the sending node as a known node, it will process the gossip section and send a handshake to the nodes that are still not known. -Note that Redis Cluster needs to form a full mesh (each node is connected with -each other node), but in order to create a cluster, there is no need to send all -the `CLUSTER MEET` commands needed to form the full mesh. What matter is to send -enough `CLUSTER MEET` messages so that each node can reach each other node -through a _chain of known nodes_. Thanks to the exchange of gossip information -in heartbeat packets, the missing links will be created. +Note that Redis Cluster needs to form a full mesh (each node is connected with each other node), but in order to create a cluster, there is no need to send all the `CLUSTER MEET` commands needed to form the full mesh. What matter is to send enough `CLUSTER MEET` messages so that each node can reach each other node through a *chain of known nodes*. Thanks to the exchange of gossip information in heartbeat packets, the missing links will be created. -So, if we link node A with node B via `CLUSTER MEET`, and B with C, A and C will -find their ways to handshake and create a link. +So, if we link node A with node B via `CLUSTER MEET`, and B with C, A and C will find their ways to handshake and create a link. -Another example: if we imagine a cluster formed of the following four nodes -called A, B, C and D, we may send just the following set of commands to A: +Another example: if we imagine a cluster formed of the following four nodes called A, B, C and D, we may send just the following set of commands to A: 1. `CLUSTER MEET B-ip B-port` 2. `CLUSTER MEET C-ip C-port` 3. `CLUSTER MEET D-ip D-port` -As a side effect of `A` knowing and being known by all the other nodes, it will -send gossip sections in the heartbeat packets that will allow each other node to -create a link with each other one, forming a full mesh in a matter of seconds, -even if the cluster is large. +As a side effect of `A` knowing and being known by all the other nodes, it will send gossip sections in the heartbeat packets that will allow each other node to create a link with each other one, forming a full mesh in a matter of seconds, even if the cluster is large. -Moreover `CLUSTER MEET` does not need to be reciprocal. If I send the command to -A in order to join B, I don't need to also send it to B in order to join A. +Moreover `CLUSTER MEET` does not need to be reciprocal. If I send the command to A in order to join B, I don't need to also send it to B in order to join A. + +If the optional `cluster_bus_port` argument is not provided, the default of port + 10000 will be used. ## Implementation details: MEET and PING packets When a given node receives a `CLUSTER MEET` message, the node specified in the -command still does not know the node we sent the command to. So in order for the -node to force the receiver to accept it as a trusted node, it sends a `MEET` -packet instead of a `PING` packet. The two packets have exactly the same format, -but the former forces the receiver to acknowledge the node as trusted. +command still does not know the node we sent the command to. So in order for +the node to force the receiver to accept it as a trusted node, it sends a +`MEET` packet instead of a `PING` packet. The two packets have exactly the +same format, but the former forces the receiver to acknowledge the node as +trusted. @return -@simple-string-reply: `OK` if the command was successful. If the address or port -specified are invalid an error is returned. +@simple-string-reply: `OK` if the command was successful. If the address or port specified are invalid an error is returned. diff --git a/iredis/data/commands/cluster-myid.md b/iredis/data/commands/cluster-myid.md index 1ff5c0f..02e8b1d 100644 --- a/iredis/data/commands/cluster-myid.md +++ b/iredis/data/commands/cluster-myid.md @@ -1,8 +1,7 @@ Returns the node's id. -The `CLUSTER MYID` command returns the unique, auto-generated identifier that is -associated with the connected cluster node. +The `CLUSTER MYID` command returns the unique, auto-generated identifier that is associated with the connected cluster node. @return -@bulk-string-reply: The node id. +@bulk-string-reply: The node id.
\ No newline at end of file diff --git a/iredis/data/commands/cluster-nodes.md b/iredis/data/commands/cluster-nodes.md index 0bdbbba..2ec706c 100644 --- a/iredis/data/commands/cluster-nodes.md +++ b/iredis/data/commands/cluster-nodes.md @@ -4,20 +4,21 @@ nodes, their flags, properties and assigned slots, and so forth. `CLUSTER NODES` provides all this information, that is, the current cluster configuration of the node we are contacting, in a serialization format which -happens to be exactly the same as the one used by Redis Cluster itself in order -to store on disk the cluster state (however the on disk cluster state has a few -additional info appended at the end). +happens to be exactly the same as the one used by Redis Cluster itself in +order to store on disk the cluster state (however the on disk cluster state +has a few additional info appended at the end). -Note that normally clients willing to fetch the map between Cluster hash slots -and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that -provides more information, should be used for administrative tasks, debugging, -and configuration inspections. It is also used by `redis-cli` in order to manage -a cluster. +Note that normally clients willing to fetch the map between Cluster +hash slots and node addresses should use `CLUSTER SLOTS` instead. +`CLUSTER NODES`, that provides more information, should be used for +administrative tasks, debugging, and configuration inspections. +It is also used by `redis-cli` in order to manage a cluster. ## Serialization format -The output of the command is just a space-separated CSV string, where each line -represents a node in the cluster. The following is an example of output: +The output of the command is just a space-separated CSV string, where +each line represents a node in the cluster. The following is an example +of output: ``` 07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected @@ -36,113 +37,74 @@ Each line is composed of the following fields: The meaning of each filed is the following: -1. `id`: The node ID, a 40 characters random string generated when a node is - created and never changed again (unless `CLUSTER RESET HARD` is used). -2. `ip:port@cport`: The node address where clients should contact the node to - run queries. -3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, - `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are - explained in detail in the next section. -4. `master`: If the node is a replica, and the master is known, the master node - ID, otherwise the "-" character. -5. `ping-sent`: Milliseconds unix time at which the currently active ping was - sent, or zero if there are no pending pings. +1. `id`: The node ID, a 40 characters random string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). +2. `ip:port@cport`: The node address where clients should contact the node to run queries. +3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are explained in detail in the next section. +4. `master`: If the node is a replica, and the master is known, the master node ID, otherwise the "-" character. +5. `ping-sent`: Milliseconds unix time at which the currently active ping was sent, or zero if there are no pending pings. 6. `pong-recv`: Milliseconds unix time the last pong was received. -7. `config-epoch`: The configuration epoch (or version) of the current node (or - of the current master if the node is a replica). Each time there is a - failover, a new, unique, monotonically increasing configuration epoch is - created. If multiple nodes claim to serve the same hash slots, the one with - higher configuration epoch wins. -8. `link-state`: The state of the link used for the node-to-node cluster bus. We - use this link to communicate with the node. Can be `connected` or - `disconnected`. -9. `slot`: A hash slot number or range. Starting from argument number 9, but - there may be up to 16384 entries in total (limit never reached). This is the - list of hash slots served by this node. If the entry is just a number, is - parsed as such. If it is a range, it is in the form `start-end`, and means - that the node is responsible for all the hash slots from `start` to `end` - including the start and end values. +7. `config-epoch`: The configuration epoch (or version) of the current node (or of the current master if the node is a replica). Each time there is a failover, a new, unique, monotonically increasing configuration epoch is created. If multiple nodes claim to serve the same hash slots, the one with higher configuration epoch wins. +8. `link-state`: The state of the link used for the node-to-node cluster bus. We use this link to communicate with the node. Can be `connected` or `disconnected`. +9. `slot`: A hash slot number or range. Starting from argument number 9, but there may be up to 16384 entries in total (limit never reached). This is the list of hash slots served by this node. If the entry is just a number, is parsed as such. If it is a range, it is in the form `start-end`, and means that the node is responsible for all the hash slots from `start` to `end` including the start and end values. Meaning of the flags (field number 3): -- `myself`: The node you are contacting. -- `master`: Node is a master. -- `slave`: Node is a replica. -- `fail?`: Node is in `PFAIL` state. Not reachable for the node you are - contacting, but still logically reachable (not in `FAIL` state). -- `fail`: Node is in `FAIL` state. It was not reachable for multiple nodes that - promoted the `PFAIL` state to `FAIL`. -- `handshake`: Untrusted node, we are handshaking. -- `noaddr`: No address known for this node. -- `nofailover`: Replica will not try to failover. -- `noflags`: No flags at all. +* `myself`: The node you are contacting. +* `master`: Node is a master. +* `slave`: Node is a replica. +* `fail?`: Node is in `PFAIL` state. Not reachable for the node you are contacting, but still logically reachable (not in `FAIL` state). +* `fail`: Node is in `FAIL` state. It was not reachable for multiple nodes that promoted the `PFAIL` state to `FAIL`. +* `handshake`: Untrusted node, we are handshaking. +* `noaddr`: No address known for this node. +* `nofailover`: Replica will not try to failover. +* `noflags`: No flags at all. ## Notes on published config epochs Replicas broadcast their master's config epochs (in order to get an `UPDATE` -message if they are found to be stale), so the real config epoch of the replica -(which is meaningless more or less, since they don't serve hash slots) can be -only obtained checking the node flagged as `myself`, which is the entry of the -node we are asking to generate `CLUSTER NODES` output. The other replicas epochs -reflect what they publish in heartbeat packets, which is, the configuration -epoch of the masters they are currently replicating. +message if they are found to be stale), so the real config epoch of the +replica (which is meaningless more or less, since they don't serve hash slots) +can be only obtained checking the node flagged as `myself`, which is the entry +of the node we are asking to generate `CLUSTER NODES` output. The other +replicas epochs reflect what they publish in heartbeat packets, which is, the +configuration epoch of the masters they are currently replicating. ## Special slot entries -Normally hash slots associated to a given node are in one of the following -formats, as already explained above: +Normally hash slots associated to a given node are in one of the following formats, +as already explained above: 1. Single number: 3894 2. Range: 3900-4000 -However node hash slots can be in a special state, used in order to communicate -errors after a node restart (mismatch between the keys in the AOF/RDB file, and -the node hash slots configuration), or when there is a resharding operation in -progress. This two states are **importing** and **migrating**. +However node hash slots can be in a special state, used in order to communicate errors after a node restart (mismatch between the keys in the AOF/RDB file, and the node hash slots configuration), or when there is a resharding operation in progress. This two states are **importing** and **migrating**. -The meaning of the two states is explained in the Redis Specification, however -the gist of the two states is the following: +The meaning of the two states is explained in the Redis Specification, however the gist of the two states is the following: -- **Importing** slots are yet not part of the nodes hash slot, there is a - migration in progress. The node will accept queries about these slots only if - the `ASK` command is used. -- **Migrating** slots are assigned to the node, but are being migrated to some - other node. The node will accept queries if all the keys in the command exist - already, otherwise it will emit what is called an **ASK redirection**, to - force new keys creation directly in the importing node. +* **Importing** slots are yet not part of the nodes hash slot, there is a migration in progress. The node will accept queries about these slots only if the `ASK` command is used. +* **Migrating** slots are assigned to the node, but are being migrated to some other node. The node will accept queries if all the keys in the command exist already, otherwise it will emit what is called an **ASK redirection**, to force new keys creation directly in the importing node. -Importing and migrating slots are emitted in the `CLUSTER NODES` output as -follows: +Importing and migrating slots are emitted in the `CLUSTER NODES` output as follows: -- **Importing slot:** `[slot_number-<-importing_from_node_id]` -- **Migrating slot:** `[slot_number->-migrating_to_node_id]` +* **Importing slot:** `[slot_number-<-importing_from_node_id]` +* **Migrating slot:** `[slot_number->-migrating_to_node_id]` The following are a few examples of importing and migrating slots: -- `[93-<-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` -- `[1002-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1]` -- `[77->-e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca]` -- `[16311->-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` +* `[93-<-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` +* `[1002-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1]` +* `[77->-e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca]` +* `[16311->-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f]` -Note that the format does not have any space, so `CLUSTER NODES` output format -is plain CSV with space as separator even when this special slots are emitted. -However a complete parser for the format should be able to handle them. +Note that the format does not have any space, so `CLUSTER NODES` output format is plain CSV with space as separator even when this special slots are emitted. However a complete parser for the format should be able to handle them. Note that: -1. Migration and importing slots are only added to the node flagged as `myself`. - This information is local to a node, for its own slots. -2. Importing and migrating slots are provided as **additional info**. If the - node has a given hash slot assigned, it will be also a plain number in the - list of hash slots, so clients that don't have a clue about hash slots - migrations can just skip this special fields. +1. Migration and importing slots are only added to the node flagged as `myself`. This information is local to a node, for its own slots. +2. Importing and migrating slots are provided as **additional info**. If the node has a given hash slot assigned, it will be also a plain number in the list of hash slots, so clients that don't have a clue about hash slots migrations can just skip this special fields. @return @bulk-string-reply: The serialized cluster configuration. -**A note about the word slave used in this man page and command name**: Starting -with Redis 5, if not for backward compatibility, the Redis project no longer -uses the word slave. Unfortunately in this command the word slave is part of the -protocol, so we'll be able to remove such occurrences only when this API will be -naturally deprecated. +**A note about the word slave used in this man page and command name**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/iredis/data/commands/cluster-replicas.md b/iredis/data/commands/cluster-replicas.md index 678638a..4e6192e 100644 --- a/iredis/data/commands/cluster-replicas.md +++ b/iredis/data/commands/cluster-replicas.md @@ -1,15 +1,14 @@ The command provides a list of replica nodes replicating from the specified -master node. The list is provided in the same format used by `CLUSTER NODES` -(please refer to its documentation for the specification of the format). +master node. The list is provided in the same format used by `CLUSTER NODES` (please refer to its documentation for the specification of the format). -The command will fail if the specified node is not known or if it is not a -master according to the node table of the node receiving the command. +The command will fail if the specified node is not known or if it is not +a master according to the node table of the node receiving the command. -Note that if a replica is added, moved, or removed from a given master node, and -we ask `CLUSTER REPLICAS` to a node that has not yet received the configuration -update, it may show stale information. However eventually (in a matter of -seconds if there are no network partitions) all the nodes will agree about the -set of nodes associated with a given master. +Note that if a replica is added, moved, or removed from a given master node, +and we ask `CLUSTER REPLICAS` to a node that has not yet received the +configuration update, it may show stale information. However eventually +(in a matter of seconds if there are no network partitions) all the nodes +will agree about the set of nodes associated with a given master. @return diff --git a/iredis/data/commands/cluster-replicate.md b/iredis/data/commands/cluster-replicate.md index ab5cf5c..5b403aa 100644 --- a/iredis/data/commands/cluster-replicate.md +++ b/iredis/data/commands/cluster-replicate.md @@ -1,6 +1,6 @@ -The command reconfigures a node as a replica of the specified master. If the -node receiving the command is an _empty master_, as a side effect of the -command, the node role is changed from master to replica. +The command reconfigures a node as a replica of the specified master. +If the node receiving the command is an *empty master*, as a side effect +of the command, the node role is changed from master to replica. Once a node is turned into the replica of another master node, there is no need to inform the other cluster nodes about the change: heartbeat packets exchanged @@ -9,21 +9,18 @@ between nodes will propagate the new configuration automatically. A replica will always accept the command, assuming that: 1. The specified node ID exists in its nodes table. -2. The specified node ID does not identify the instance we are sending the - command to. +2. The specified node ID does not identify the instance we are sending the command to. 3. The specified node ID is a master. -If the node receiving the command is not already a replica, but is a master, the -command will only succeed, and the node will be converted into a replica, only -if the following additional conditions are met: +If the node receiving the command is not already a replica, but is a master, +the command will only succeed, and the node will be converted into a replica, +only if the following additional conditions are met: 1. The node is not serving any hash slots. 2. The node is empty, no keys are stored at all in the key space. -If the command succeeds the new replica will immediately try to contact its -master in order to replicate from it. +If the command succeeds the new replica will immediately try to contact its master in order to replicate from it. @return -@simple-string-reply: `OK` if the command was executed successfully, otherwise -an error is returned. +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/iredis/data/commands/cluster-reset.md b/iredis/data/commands/cluster-reset.md index 4186725..02ffe9e 100644 --- a/iredis/data/commands/cluster-reset.md +++ b/iredis/data/commands/cluster-reset.md @@ -1,29 +1,25 @@ -Reset a Redis Cluster node, in a more or less drastic way depending on the reset -type, that can be **hard** or **soft**. Note that this command **does not work -for masters if they hold one or more keys**, in that case to completely reset a -master node keys must be removed first, e.g. by using `FLUSHALL` first, and then -`CLUSTER RESET`. +Reset a Redis Cluster node, in a more or less drastic way depending on the +reset type, that can be **hard** or **soft**. Note that this command +**does not work for masters if they hold one or more keys**, in that case +to completely reset a master node keys must be removed first, e.g. by using `FLUSHALL` first, +and then `CLUSTER RESET`. Effects on the node: 1. All the other nodes in the cluster are forgotten. -2. All the assigned / open slots are reset, so the slots-to-nodes mapping is - totally cleared. -3. If the node is a replica it is turned into an (empty) master. Its dataset is - flushed, so at the end the node will be an empty master. +2. All the assigned / open slots are reset, so the slots-to-nodes mapping is totally cleared. +3. If the node is a replica it is turned into an (empty) master. Its dataset is flushed, so at the end the node will be an empty master. 4. **Hard reset only**: a new Node ID is generated. 5. **Hard reset only**: `currentEpoch` and `configEpoch` vars are set to 0. -6. The new configuration is persisted on disk in the node cluster configuration - file. +6. The new configuration is persisted on disk in the node cluster configuration file. -This command is mainly useful to re-provision a Redis Cluster node in order to -be used in the context of a new, different cluster. The command is also -extensively used by the Redis Cluster testing framework in order to reset the -state of the cluster every time a new test unit is executed. +This command is mainly useful to re-provision a Redis Cluster node +in order to be used in the context of a new, different cluster. The command +is also extensively used by the Redis Cluster testing framework in order to +reset the state of the cluster every time a new test unit is executed. If no reset type is specified, the default is **soft**. @return -@simple-string-reply: `OK` if the command was successful. Otherwise an error is -returned. +@simple-string-reply: `OK` if the command was successful. Otherwise an error is returned. diff --git a/iredis/data/commands/cluster-saveconfig.md b/iredis/data/commands/cluster-saveconfig.md index 115d88a..31308c2 100644 --- a/iredis/data/commands/cluster-saveconfig.md +++ b/iredis/data/commands/cluster-saveconfig.md @@ -1,14 +1,14 @@ Forces a node to save the `nodes.conf` configuration on disk. Before to return -the command calls `fsync(2)` in order to make sure the configuration is flushed -on the computer disk. +the command calls `fsync(2)` in order to make sure the configuration is +flushed on the computer disk. -This command is mainly used in the event a `nodes.conf` node state file gets -lost / deleted for some reason, and we want to generate it again from scratch. -It can also be useful in case of mundane alterations of a node cluster +This command is mainly used in the event a `nodes.conf` node state file +gets lost / deleted for some reason, and we want to generate it again from +scratch. It can also be useful in case of mundane alterations of a node cluster configuration via the `CLUSTER` command in order to ensure the new configuration -is persisted on disk, however all the commands should normally be able to auto -schedule to persist the configuration on disk when it is important to do so for -the correctness of the system in the event of a restart. +is persisted on disk, however all the commands should normally be able to +auto schedule to persist the configuration on disk when it is important +to do so for the correctness of the system in the event of a restart. @return diff --git a/iredis/data/commands/cluster-set-config-epoch.md b/iredis/data/commands/cluster-set-config-epoch.md index b64fffa..71f458f 100644 --- a/iredis/data/commands/cluster-set-config-epoch.md +++ b/iredis/data/commands/cluster-set-config-epoch.md @@ -1,25 +1,25 @@ -This command sets a specific _config epoch_ in a fresh node. It only works when: +This command sets a specific *config epoch* in a fresh node. It only works when: 1. The nodes table of the node is empty. -2. The node current _config epoch_ is zero. +2. The node current *config epoch* is zero. These prerequisites are needed since usually, manually altering the configuration epoch of a node is unsafe, we want to be sure that the node with the higher configuration epoch value (that is the last that failed over) wins over other nodes in claiming the hash slots ownership. -However there is an exception to this rule, and it is when a new cluster is -created from scratch. Redis Cluster _config epoch collision resolution_ -algorithm can deal with new nodes all configured with the same configuration at -startup, but this process is slow and should be the exception, only to make sure -that whatever happens, two more nodes eventually always move away from the state -of having the same configuration epoch. +However there is an exception to this rule, and it is when a new +cluster is created from scratch. Redis Cluster *config epoch collision +resolution* algorithm can deal with new nodes all configured with the +same configuration at startup, but this process is slow and should be +the exception, only to make sure that whatever happens, two more +nodes eventually always move away from the state of having the same +configuration epoch. -So, using `CONFIG SET-CONFIG-EPOCH`, when a new cluster is created, we can -assign a different progressive configuration epoch to each node before joining -the cluster together. +So, using `CLUSTER SET-CONFIG-EPOCH`, when a new cluster is created, we can +assign a different progressive configuration epoch to each node before +joining the cluster together. @return -@simple-string-reply: `OK` if the command was executed successfully, otherwise -an error is returned. +@simple-string-reply: `OK` if the command was executed successfully, otherwise an error is returned. diff --git a/iredis/data/commands/cluster-setslot.md b/iredis/data/commands/cluster-setslot.md index 213acba..e712d36 100644 --- a/iredis/data/commands/cluster-setslot.md +++ b/iredis/data/commands/cluster-setslot.md @@ -1,133 +1,85 @@ -`CLUSTER SETSLOT` is responsible of changing the state of a hash slot in the -receiving node in different ways. It can, depending on the subcommand used: +`CLUSTER SETSLOT` is responsible of changing the state of a hash slot in the receiving node in different ways. It can, depending on the subcommand used: -1. `MIGRATING` subcommand: Set a hash slot in _migrating_ state. -2. `IMPORTING` subcommand: Set a hash slot in _importing_ state. +1. `MIGRATING` subcommand: Set a hash slot in *migrating* state. +2. `IMPORTING` subcommand: Set a hash slot in *importing* state. 3. `STABLE` subcommand: Clear any importing / migrating state from hash slot. 4. `NODE` subcommand: Bind the hash slot to a different node. -The command with its set of subcommands is useful in order to start and end -cluster live resharding operations, which are accomplished by setting a hash -slot in migrating state in the source node, and importing state in the -destination node. +The command with its set of subcommands is useful in order to start and end cluster live resharding operations, which are accomplished by setting a hash slot in migrating state in the source node, and importing state in the destination node. -Each subcommand is documented below. At the end you'll find a description of how -live resharding is performed using this command and other related commands. +Each subcommand is documented below. At the end you'll find a description of +how live resharding is performed using this command and other related commands. ## CLUSTER SETSLOT `<slot>` MIGRATING `<destination-node-id>` -This subcommand sets a slot to _migrating_ state. In order to set a slot in this -state, the node receiving the command must be the hash slot owner, otherwise an -error is returned. +This subcommand sets a slot to *migrating* state. In order to set a slot +in this state, the node receiving the command must be the hash slot owner, +otherwise an error is returned. When a slot is set in migrating state, the node changes behavior in the following way: -1. If a command is received about an existing key, the command is processed as - usually. -2. If a command is received about a key that does not exists, an `ASK` - redirection is emitted by the node, asking the client to retry only that - specific query into `destination-node`. In this case the client should not - update its hash slot to node mapping. -3. If the command contains multiple keys, in case none exist, the behavior is - the same as point 2, if all exist, it is the same as point 1, however if only - a partial number of keys exist, the command emits a `TRYAGAIN` error in order - for the keys interested to finish being migrated to the target node, so that - the multi keys command can be executed. +1. If a command is received about an existing key, the command is processed as usually. +2. If a command is received about a key that does not exists, an `ASK` redirection is emitted by the node, asking the client to retry only that specific query into `destination-node`. In this case the client should not update its hash slot to node mapping. +3. If the command contains multiple keys, in case none exist, the behavior is the same as point 2, if all exist, it is the same as point 1, however if only a partial number of keys exist, the command emits a `TRYAGAIN` error in order for the keys interested to finish being migrated to the target node, so that the multi keys command can be executed. ## CLUSTER SETSLOT `<slot>` IMPORTING `<source-node-id>` -This subcommand is the reverse of `MIGRATING`, and prepares the destination node -to import keys from the specified source node. The command only works if the -node is not already owner of the specified hash slot. +This subcommand is the reverse of `MIGRATING`, and prepares the destination +node to import keys from the specified source node. The command only works if +the node is not already owner of the specified hash slot. -When a slot is set in importing state, the node changes behavior in the -following way: +When a slot is set in importing state, the node changes behavior in the following way: -1. Commands about this hash slot are refused and a `MOVED` redirection is - generated as usually, but in the case the command follows an `ASKING` - command, in this case the command is executed. +1. Commands about this hash slot are refused and a `MOVED` redirection is generated as usually, but in the case the command follows an `ASKING` command, in this case the command is executed. -In this way when a node in migrating state generates an `ASK` redirection, the -client contacts the target node, sends `ASKING`, and immediately after sends the -command. This way commands about non-existing keys in the old node or keys -already migrated to the target node are executed in the target node, so that: +In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, sends `ASKING`, and immediately after sends the command. This way commands about non-existing keys in the old node or keys already migrated to the target node are executed in the target node, so that: -1. New keys are always created in the target node. During a hash slot migration - we'll have to move only old keys, not new ones. -2. Commands about keys already migrated are correctly processed in the context - of the node which is the target of the migration, the new hash slot owner, in - order to guarantee consistency. -3. Without `ASKING` the behavior is the same as usually. This guarantees that - clients with a broken hash slots mapping will not write for error in the - target node, creating a new version of a key that has yet to be migrated. +1. New keys are always created in the target node. During a hash slot migration we'll have to move only old keys, not new ones. +2. Commands about keys already migrated are correctly processed in the context of the node which is the target of the migration, the new hash slot owner, in order to guarantee consistency. +3. Without `ASKING` the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. ## CLUSTER SETSLOT `<slot>` STABLE This subcommand just clears migrating / importing state from the slot. It is -mainly used to fix a cluster stuck in a wrong state by -`redis-cli --cluster fix`. Normally the two states are cleared automatically at -the end of the migration using the `SETSLOT ... NODE ...` subcommand as -explained in the next section. +mainly used to fix a cluster stuck in a wrong state by `redis-cli --cluster fix`. +Normally the two states are cleared automatically at the end of the migration +using the `SETSLOT ... NODE ...` subcommand as explained in the next section. ## CLUSTER SETSLOT `<slot>` NODE `<node-id>` -The `NODE` subcommand is the one with the most complex semantics. It associates -the hash slot with the specified node, however the command works only in -specific situations and has different side effects depending on the slot state. -The following is the set of pre-conditions and side effects of the command: - -1. If the current hash slot owner is the node receiving the command, but for - effect of the command the slot would be assigned to a different node, the - command will return an error if there are still keys for that hash slot in - the node receiving the command. -2. If the slot is in _migrating_ state, the state gets cleared when the slot is - assigned to another node. -3. If the slot was in _importing_ state in the node receiving the command, and - the command assigns the slot to this node (which happens in the target node - at the end of the resharding of a hash slot from one node to another), the - command has the following side effects: A) the _importing_ state is cleared. - B) If the node config epoch is not already the greatest of the cluster, it - generates a new one and assigns the new config epoch to itself. This way its - new hash slot ownership will win over any past configuration created by - previous failovers or slot migrations. - -It is important to note that step 3 is the only time when a Redis Cluster node -will create a new config epoch without agreement from other nodes. This only -happens when a manual configuration is operated. However it is impossible that -this creates a non-transient setup where two nodes have the same config epoch, -since Redis Cluster uses a config epoch collision resolution algorithm. +The `NODE` subcommand is the one with the most complex semantics. It +associates the hash slot with the specified node, however the command works +only in specific situations and has different side effects depending on the +slot state. The following is the set of pre-conditions and side effects of the +command: + +1. If the current hash slot owner is the node receiving the command, but for effect of the command the slot would be assigned to a different node, the command will return an error if there are still keys for that hash slot in the node receiving the command. +2. If the slot is in *migrating* state, the state gets cleared when the slot is assigned to another node. +3. If the slot was in *importing* state in the node receiving the command, and the command assigns the slot to this node (which happens in the target node at the end of the resharding of a hash slot from one node to another), the command has the following side effects: A) the *importing* state is cleared. B) If the node config epoch is not already the greatest of the cluster, it generates a new one and assigns the new config epoch to itself. This way its new hash slot ownership will win over any past configuration created by previous failovers or slot migrations. + +It is important to note that step 3 is the only time when a Redis Cluster node will create a new config epoch without agreement from other nodes. This only happens when a manual configuration is operated. However it is impossible that this creates a non-transient setup where two nodes have the same config epoch, since Redis Cluster uses a config epoch collision resolution algorithm. @return -@simple-string-reply: All the subcommands return `OK` if the command was -successful. Otherwise an error is returned. +@simple-string-reply: All the subcommands return `OK` if the command was successful. Otherwise an error is returned. ## Redis Cluster live resharding explained -The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in -order to migrate all the keys contained in one hash slot from one node to -another. This is how the migration is orchestrated, with the help of other -commands as well. We'll call the node that has the current ownership of the hash -slot the `source` node, and the node where we want to migrate the `destination` -node. - -1. Set the destination node slot to _importing_ state using - `CLUSTER SETSLOT <slot> IMPORTING <source-node-id>`. -2. Set the source node slot to _migrating_ state using - `CLUSTER SETSLOT <slot> MIGRATING <destination-node-id>`. -3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move - them into the destination node using the `MIGRATE` command. -4. Use `CLUSTER SETSLOT <slot> NODE <destination-node-id>` in the source or - destination. +The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in order to migrate all the keys contained in one hash slot from one node to another. This is how the migration is orchestrated, with the help of other commands as well. We'll call the node that has the current ownership of the hash slot the `source` node, and the node where we want to migrate the `destination` node. + +1. Set the destination node slot to *importing* state using `CLUSTER SETSLOT <slot> IMPORTING <source-node-id>`. +2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT <slot> MIGRATING <destination-node-id>`. +3. Get keys from the source node with `CLUSTER GETKEYSINSLOT` command and move them into the destination node using the `MIGRATE` command. +4. Send `CLUSTER SETSLOT <slot> NODE <destination-node-id>` to the destination node. +5. Send `CLUSTER SETSLOT <slot> NODE <destination-node-id>` to the source node. +6. Send `CLUSTER SETSLOT <slot> NODE <destination-node-id>` to the other master nodes (optional). Notes: -- The order of step 1 and 2 is important. We want the destination node to be - ready to accept `ASK` redirections when the source node is configured to - redirect. -- Step 4 does not technically need to use `SETSLOT` in the nodes not involved in - the resharding, since the configuration will eventually propagate itself, - however it is a good idea to do so in order to stop nodes from pointing to the - wrong node for the hash slot moved as soon as possible, resulting in less - redirections to find the right node. +* The order of step 1 and 2 is important. We want the destination node to be ready to accept `ASK` redirections when the source node is configured to redirect. +* The order of step 4 and 5 is important. + The destination node is responsible for propagating the change to the rest of the cluster. + If the source node is informed before the destination node and the destination node crashes before it is set as new slot owner, the slot is left with no owner, even after a successful failover. +* Step 6, sending `SETSLOT` to the nodes not involved in the resharding, is not technically necessary since the configuration will eventually propagate itself. + However, it is a good idea to do so in order to stop nodes from pointing to the wrong node for the hash slot moved as soon as possible, resulting in less redirections to find the right node. diff --git a/iredis/data/commands/cluster-shards.md b/iredis/data/commands/cluster-shards.md new file mode 100644 index 0000000..bca6d1c --- /dev/null +++ b/iredis/data/commands/cluster-shards.md @@ -0,0 +1,153 @@ +`CLUSTER SHARDS` returns details about the shards of the cluster. +A shard is defined as a collection of nodes that serve the same set of slots and that replicate from each other. +A shard may only have a single master at a given time, but may have multiple or no replicas. +It is possible for a shard to not be serving any slots while still having replicas. + +This command replaces the `CLUSTER SLOTS` command, by providing a more efficient and extensible representation of the cluster. + +The command is suitable to be used by Redis Cluster client libraries in order to understand the topology of the cluster. +A client should issue this command on startup in order to retrieve the map associating cluster *hash slots* with actual node information. +This map should be used to direct commands to the node that is likely serving the slot associated with a given command. +In the event the command is sent to the wrong node, in that it received a '-MOVED' redirect, this command can then be used to update the topology of the cluster. + +The command returns an array of shards, with each shard containing two fields, 'slots' and 'nodes'. + +The 'slots' field is a list of slot ranges served by this shard, stored as pair of integers representing the inclusive start and end slots of the ranges. +For example, if a node owns the slots 1, 2, 3, 5, 7, 8 and 9, the slots ranges would be stored as [1-3], [5-5], [7-9]. +The slots field would therefor be represented by the following list of integers. + +``` +1) 1) "slots" + 2) 1) (integer) 1 + 2) (integer) 3 + 3) (integer) 5 + 4) (integer) 5 + 5) (integer) 7 + 6) (integer) 9 +``` + +The 'nodes' field contains a list of all nodes within the shard. +Each individual node is a map of attributes that describe the node. +Some attributes are optional and more attributes may be added in the future. +The current list of attributes: + +* id: The unique node id for this particular node. +* endpoint: The preferred endpoint to reach the node, see below for more information about the possible values of this field. +* ip: The IP address to send requests to for this node. +* hostname (optional): The announced hostname to send requests to for this node. +* port (optional): The TCP (non-TLS) port of the node. At least one of port or tls-port will be present. +* tls-port (optional): The TLS port of the node. At least one of port or tls-port will be present. +* role: The replication role of this node. +* replication-offset: The replication offset of this node. This information can be used to send commands to the most up to date replicas. +* health: Either `online`, `failed`, or `loading`. This information should be used to determine which nodes should be sent traffic. The `loading` health state should be used to know that a node is not currently eligible to serve traffic, but may be eligible in the future. + +The endpoint, along with the port, defines the location that clients should use to send requests for a given slot. +A NULL value for the endpoint indicates the node has an unknown endpoint and the client should connect to the same endpoint it used to send the `CLUSTER SHARDS` command but with the port returned from the command. +This unknown endpoint configuration is useful when the Redis nodes are behind a load balancer that Redis doesn't know the endpoint of. +Which endpoint is set is determined by the `cluster-preferred-endpoint-type` config. + +@return + +@array-reply: nested list of a map of hash ranges and shard nodes. + +@examples + +``` +> CLUSTER SHARDS +1) 1) "slots" + 2) 1) (integer) 10923 + 2) (integer) 11110 + 3) (integer) 11113 + 4) (integer) 16111 + 5) (integer) 16113 + 6) (integer) 16383 + 3) "nodes" + 4) 1) 1) "id" + 2) "71f058078c142a73b94767a4e78e9033d195dfb4" + 3) "port" + 4) (integer) 6381 + 5) "ip" + 6) "127.0.0.1" + 7) "role" + 8) "primary" + 9) "replication-offset" + 10) (integer) 1500 + 11) "health" + 12) "online" + 2) 1) "id" + 2) "1461967c62eab0e821ed54f2c98e594fccfd8736" + 3) "port" + 4) (integer) 7381 + 5) "ip" + 6) "127.0.0.1" + 7) "role" + 8) "replica" + 9) "replication-offset" + 10) (integer) 700 + 11) "health" + 12) "fail" +2) 1) "slots" + 2) 1) (integer) 5461 + 2) (integer) 10922 + 3) "nodes" + 4) 1) 1) "id" + 2) "9215e30cd4a71070088778080565de6ef75fd459" + 3) "port" + 4) (integer) 6380 + 5) "ip" + 6) "127.0.0.1" + 7) "role" + 8) "primary" + 9) "replication-offset" + 10) (integer) 1200 + 11) "health" + 12) "online" + 2) 1) "id" + 2) "877fa59da72cb902d0563d3d8def3437fc3a0196" + 3) "port" + 4) (integer) 7380 + 5) "ip" + 6) "127.0.0.1" + 7) "role" + 8) "replica" + 9) "replication-offset" + 10) (integer) 1100 + 11) "health" + 12) "loading" +3) 1) "slots" + 2) 1) (integer) 0 + 2) (integer) 5460 + 3) (integer) 11111 + 4) (integer) 11112 + 3) (integer) 16112 + 4) (integer) 16112 + 3) "nodes" + 4) 1) 1) "id" + 2) "b7e9acc0def782aabe6b596f67f06c73c2ffff93" + 3) "port" + 4) (integer) 7379 + 5) "ip" + 6) "127.0.0.1" + 7) "hostname" + 8) "example.com" + 9) "role" + 10) "replica" + 11) "replication-offset" + 12) "primary" + 13) "health" + 14) "online" + 2) 1) "id" + 2) "e2acf1a97c055fd09dcc2c0dcc62b19a6905dbc8" + 3) "port" + 4) (integer) 6379 + 5) "ip" + 6) "127.0.0.1" + 7) "hostname" + 8) "example.com" + 9) "role" + 10) "replica" + 11) "replication-offset" + 12) (integer) 0 + 13) "health" + 14) "loading" +```
\ No newline at end of file diff --git a/iredis/data/commands/cluster-slaves.md b/iredis/data/commands/cluster-slaves.md index 0b2aa49..d90eaf3 100644 --- a/iredis/data/commands/cluster-slaves.md +++ b/iredis/data/commands/cluster-slaves.md @@ -1,21 +1,16 @@ -**A note about the word slave used in this man page and command name**: Starting -with Redis 5 this command: starting with Redis version 5, if not for backward -compatibility, the Redis project no longer uses the word slave. Please use the -new command `CLUSTER REPLICAS`. The command `SLAVEOF` will continue to work for -backward compatibility. +**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command `CLUSTER REPLICAS`. The command `CLUSTER SLAVES` will continue to work for backward compatibility. The command provides a list of replica nodes replicating from the specified -master node. The list is provided in the same format used by `CLUSTER NODES` -(please refer to its documentation for the specification of the format). +master node. The list is provided in the same format used by `CLUSTER NODES` (please refer to its documentation for the specification of the format). -The command will fail if the specified node is not known or if it is not a -master according to the node table of the node receiving the command. +The command will fail if the specified node is not known or if it is not +a master according to the node table of the node receiving the command. -Note that if a replica is added, moved, or removed from a given master node, and -we ask `CLUSTER SLAVES` to a node that has not yet received the configuration -update, it may show stale information. However eventually (in a matter of -seconds if there are no network partitions) all the nodes will agree about the -set of nodes associated with a given master. +Note that if a replica is added, moved, or removed from a given master node, +and we ask `CLUSTER SLAVES` to a node that has not yet received the +configuration update, it may show stale information. However eventually +(in a matter of seconds if there are no network partitions) all the nodes +will agree about the set of nodes associated with a given master. @return diff --git a/iredis/data/commands/cluster-slots.md b/iredis/data/commands/cluster-slots.md index 693e6b3..68901fe 100644 --- a/iredis/data/commands/cluster-slots.md +++ b/iredis/data/commands/cluster-slots.md @@ -1,102 +1,92 @@ -`CLUSTER SLOTS` returns details about which cluster slots map to which Redis -instances. The command is suitable to be used by Redis Cluster client libraries -implementations in order to retrieve (or update when a redirection is received) -the map associating cluster _hash slots_ with actual nodes network coordinates -(composed of an IP address and a TCP port), so that when a command is received, -it can be sent to what is likely the right instance for the keys specified in -the command. +`CLUSTER SLOTS` returns details about which cluster slots map to which Redis instances. +The command is suitable to be used by Redis Cluster client libraries implementations in order to retrieve (or update when a redirection is received) the map associating cluster *hash slots* with actual nodes network information, so that when a command is received, it can be sent to what is likely the right instance for the keys specified in the command. -## Nested Result Array +The networking information for each node is an array containing the following elements: + +* Preferred endpoint (Either an IP address, hostname, or NULL) +* Port number +* The node ID +* A map of additional networking metadata + +The preferred endpoint, along with the port, defines the location that clients should use to send requests for a given slot. +A NULL value for the endpoint indicates the node has an unknown endpoint and the client should connect to the same endpoint it used to send the `CLUSTER SLOTS` command but with the port returned from the command. +This unknown endpoint configuration is useful when the Redis nodes are behind a load balancer that Redis doesn't know the endpoint of. +Which endpoint is set as preferred is determined by the `cluster-preferred-endpoint-type` config. +Additional networking metadata is provided as a map on the fourth argument for each node. +The following networking metadata may be returned: + +* IP: When the preferred endpoint is not set to IP. +* Hostname: When a node has an announced hostname but the primary endpoint is not set to hostname. + +## Nested Result Array Each nested result is: -- Start slot range -- End slot range -- Master for slot range represented as nested IP/Port array -- First replica of master for slot range -- Second replica -- ...continues until all replicas for this master are returned. - -Each result includes all active replicas of the master instance for the listed -slot range. Failed replicas are not returned. - -The third nested reply is guaranteed to be the IP/Port pair of the master -instance for the slot range. All IP/Port pairs after the third nested reply are -replicas of the master. - -If a cluster instance has non-contiguous slots (e.g. 1-400,900,1800-6000) then -master and replica IP/Port results will be duplicated for each top-level slot -range reply. - -**Warning:** Newer versions of Redis Cluster will output, for each Redis -instance, not just the IP and port, but also the node ID as third element of the -array. In future versions there could be more elements describing the node -better. In general a client implementation should just rely on the fact that -certain parameters are at fixed positions as specified, but more parameters may -follow and should be ignored. Similarly a client library should try if possible -to cope with the fact that older versions may just have the IP and port -parameter. + - Start slot range + - End slot range + - Master for slot range represented as nested networking information + - First replica of master for slot range + - Second replica + - ...continues until all replicas for this master are returned. -@return +Each result includes all active replicas of the master instance +for the listed slot range. Failed replicas are not returned. -@array-reply: nested list of slot ranges with IP/Port mappings. +The third nested reply is guaranteed to be the networking information of the master instance for the slot range. +All networking information after the third nested reply are replicas of the master. -### Sample Output (old version) +If a cluster instance has non-contiguous slots (e.g. 1-400,900,1800-6000) then master and replica networking information results will be duplicated for each top-level slot range reply. -``` -127.0.0.1:7001> cluster slots -1) 1) (integer) 0 - 2) (integer) 4095 - 3) 1) "127.0.0.1" - 2) (integer) 7000 - 4) 1) "127.0.0.1" - 2) (integer) 7004 -2) 1) (integer) 12288 - 2) (integer) 16383 - 3) 1) "127.0.0.1" - 2) (integer) 7003 - 4) 1) "127.0.0.1" - 2) (integer) 7007 -3) 1) (integer) 4096 - 2) (integer) 8191 - 3) 1) "127.0.0.1" - 2) (integer) 7001 - 4) 1) "127.0.0.1" - 2) (integer) 7005 -4) 1) (integer) 8192 - 2) (integer) 12287 - 3) 1) "127.0.0.1" - 2) (integer) 7002 - 4) 1) "127.0.0.1" - 2) (integer) 7006 -``` +@return + +@array-reply: nested list of slot ranges with networking information. -### Sample Output (new version, includes IDs) +@examples ``` -127.0.0.1:30001> cluster slots +> CLUSTER SLOTS 1) 1) (integer) 0 2) (integer) 5460 3) 1) "127.0.0.1" 2) (integer) 30001 3) "09dbe9720cda62f7865eabc5fd8857c5d2678366" + 4) 1) hostname + 2) "host-1.redis.example.com" 4) 1) "127.0.0.1" 2) (integer) 30004 3) "821d8ca00d7ccf931ed3ffc7e3db0599d2271abf" + 4) 1) hostname + 2) "host-2.redis.example.com" 2) 1) (integer) 5461 2) (integer) 10922 3) 1) "127.0.0.1" 2) (integer) 30002 3) "c9d93d9f2c0c524ff34cc11838c2003d8c29e013" + 4) 1) hostname + 2) "host-3.redis.example.com" 4) 1) "127.0.0.1" 2) (integer) 30005 3) "faadb3eb99009de4ab72ad6b6ed87634c7ee410f" + 4) 1) hostname + 2) "host-4.redis.example.com" 3) 1) (integer) 10923 2) (integer) 16383 3) 1) "127.0.0.1" 2) (integer) 30003 3) "044ec91f325b7595e76dbcb18cc688b6a5b434a1" + 4) 1) hostname + 2) "host-5.redis.example.com" 4) 1) "127.0.0.1" 2) (integer) 30006 3) "58e6e48d41228013e5d9c1c37c5060693925e97e" + 4) 1) hostname + 2) "host-6.redis.example.com" ``` + +**Warning:** In future versions there could be more elements describing the node better. +In general a client implementation should just rely on the fact that certain parameters are at fixed positions as specified, but more parameters may follow and should be ignored. +Similarly a client library should try if possible to cope with the fact that older versions may just have the primary endpoint and port parameter. + +## Behavior change history + +* `>= 7.0.0`: Added support for hostnames and unknown endpoints in first field of node response.
\ No newline at end of file diff --git a/iredis/data/commands/cluster.md b/iredis/data/commands/cluster.md new file mode 100644 index 0000000..86d5c00 --- /dev/null +++ b/iredis/data/commands/cluster.md @@ -0,0 +1,3 @@ +This is a container command for Redis Cluster commands. + +To see the list of available commands you can call `CLUSTER HELP`. diff --git a/iredis/data/commands/command-docs.md b/iredis/data/commands/command-docs.md new file mode 100644 index 0000000..35ea017 --- /dev/null +++ b/iredis/data/commands/command-docs.md @@ -0,0 +1,55 @@ +Return documentary information about commands. + +By default, the reply includes all of the server's commands. +You can use the optional _command-name_ argument to specify the names of one or more commands. + +The reply includes a map for each returned command. +The following keys may be included in the mapped reply: + +* **summary:** short command description. +* **since:** the Redis version that added the command (or for module commands, the module version). +* **group:** the functional group to which the command belongs. + Possible values are: + - _bitmap_ + - _cluster_ + - _connection_ + - _generic_ + - _geo_ + - _hash_ + - _hyperloglog_ + - _list_ + - _module_ + - _pubsub_ + - _scripting_ + - _sentinel_ + - _server_ + - _set_ + - _sorted-set_ + - _stream_ + - _string_ + - _transactions_ +* **complexity:** a short explanation about the command's time complexity. +* **doc_flags:** an array of documentation flags. + Possible values are: + - _deprecated:_ the command is deprecated. + - _syscmd:_ a system command that isn't meant to be called by users. +* **deprecated_since:** the Redis version that deprecated the command (or for module commands, the module version).. +* **replaced_by:** the alternative for a deprecated command. +* **history:** an array of historical notes describing changes to the command's behavior or arguments. + Each entry is an array itself, made up of two elements: + 1. The Redis version that the entry applies to. + 2. The description of the change. +* **arguments:** an array of maps that describe the command's arguments. + Please refer to the [Redis command arguments][td] page for more information. + +[td]: /topics/command-arguments + +@return + +@array-reply: a map as a flattened array as described above. + +@examples + +```cli +COMMAND DOCS SET +``` diff --git a/iredis/data/commands/command-getkeys.md b/iredis/data/commands/command-getkeys.md index 1c591f1..6b8f300 100644 --- a/iredis/data/commands/command-getkeys.md +++ b/iredis/data/commands/command-getkeys.md @@ -1,12 +1,12 @@ Returns @array-reply of keys from a full Redis command. -`COMMAND GETKEYS` is a helper command to let you find the keys from a full Redis -command. +`COMMAND GETKEYS` is a helper command to let you find the keys +from a full Redis command. + +`COMMAND` provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), +but in some cases it's not possible to find keys of certain commands and then the entire command must be parsed to discover some / all key names. +You can use `COMMAND GETKEYS` or `COMMAND GETKEYSANDFLAGS` to discover key names directly from how Redis parses the commands. -`COMMAND` shows some commands as having movablekeys meaning the entire command -must be parsed to discover storage or retrieval keys. You can use -`COMMAND GETKEYS` to discover key positions directly from how Redis parses the -commands. @return diff --git a/iredis/data/commands/command-getkeysandflags.md b/iredis/data/commands/command-getkeysandflags.md new file mode 100644 index 0000000..3fa479d --- /dev/null +++ b/iredis/data/commands/command-getkeysandflags.md @@ -0,0 +1,22 @@ +Returns @array-reply of keys from a full Redis command and their usage flags. + +`COMMAND GETKEYSANDFLAGS` is a helper command to let you find the keys from a full Redis command together with flags indicating what each key is used for. + +`COMMAND` provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), +but in some cases it's not possible to find keys of certain commands and then the entire command must be parsed to discover some / all key names. +You can use `COMMAND GETKEYS` or `COMMAND GETKEYSANDFLAGS` to discover key names directly from how Redis parses the commands. + +Refer to [key specifications](/topics/key-specs#logical-operation-flags) for information about the meaning of the key flags. + +@return + +@array-reply: list of keys from your command. +Each element of the array is an array containing key name in the first entry, and flags in the second. + +@examples + +```cli +COMMAND GETKEYS MSET a b c d e f +COMMAND GETKEYS EVAL "not consulted" 3 key1 key2 key3 arg1 arg2 arg3 argN +COMMAND GETKEYSANDFLAGS LMOVE mylist1 mylist2 left left +``` diff --git a/iredis/data/commands/command-help.md b/iredis/data/commands/command-help.md new file mode 100644 index 0000000..73d4cc4 --- /dev/null +++ b/iredis/data/commands/command-help.md @@ -0,0 +1,5 @@ +The `COMMAND HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/command-info.md b/iredis/data/commands/command-info.md index 92836e4..e16a555 100644 --- a/iredis/data/commands/command-info.md +++ b/iredis/data/commands/command-info.md @@ -1,10 +1,11 @@ Returns @array-reply of details about multiple Redis commands. -Same result format as `COMMAND` except you can specify which commands get -returned. +Same result format as `COMMAND` except you can specify which commands +get returned. + +If you request details about non-existing commands, their return +position will be nil. -If you request details about non-existing commands, their return position will -be nil. @return diff --git a/iredis/data/commands/command-list.md b/iredis/data/commands/command-list.md new file mode 100644 index 0000000..5c0a4a7 --- /dev/null +++ b/iredis/data/commands/command-list.md @@ -0,0 +1,11 @@ +Return an array of the server's command names. + +You can use the optional _FILTERBY_ modifier to apply one of the following filters: + + - **MODULE module-name**: get the commands that belong to the module specified by _module-name_. + - **ACLCAT category**: get the commands in the [ACL category](/docs/manual/security/acl/#command-categories) specified by _category_. + - **PATTERN pattern**: get the commands that match the given glob-like _pattern_. + +@return + +@array-reply: a list of command names. diff --git a/iredis/data/commands/command.md b/iredis/data/commands/command.md index 2d2b1fc..37545f9 100644 --- a/iredis/data/commands/command.md +++ b/iredis/data/commands/command.md @@ -1,94 +1,91 @@ -Returns @array-reply of details about all Redis commands. +Return an array with details about every Redis command. -Cluster clients must be aware of key positions in commands so commands can go to -matching instances, but Redis commands vary between accepting one key, multiple -keys, or even multiple keys separated by other data. +The `COMMAND` command is introspective. +Its reply describes all commands that the server can process. +Redis clients can call it to obtain the server's runtime capabilities during the handshake. -You can use `COMMAND` to cache a mapping between commands and key positions for -each command to enable exact routing of commands to cluster instances. +`COMMAND` also has several subcommands. +Please refer to its subcommands for further details. -## Nested Result Array +**Cluster note:** +this command is especially beneficial for cluster-aware clients. +Such clients must identify the names of keys in commands to route requests to the correct shard. +Although most commands accept a single key as their first argument, there are many exceptions to this rule. +You can call `COMMAND` and then keep the mapping between commands and their respective key specification rules cached in the client. -Each top-level result contains six nested results. Each nested result is: +The reply it returns is an array with an element per command. +Each element that describes a Redis command is represented as an array by itself. -- command name -- command arity specification -- nested @array-reply of command flags -- position of first key in argument list -- position of last key in argument list -- step count for locating repeating keys +The command's array consists of a fixed number of elements. +The exact number of elements in the array depends on the server's version. -### Command Name +1. Name +1. Arity +1. Flags +1. First key +1. Last key +1. Step +1. [ACL categories][ta] (as of Redis 6.0) +1. [Tips][tb] (as of Redis 7.0) +1. [Key specifications][td] (as of Redis 7.0) +1. Subcommands (as of Redis 7.0) -Command name is the command returned as a lowercase string. +## Name -### Command Arity +This is the command's name in lowercase. -<table style="width:50%"> -<tr><td> -<pre> -<code>1) 1) "get" - 2) (integer) 2 - 3) 1) readonly - 4) (integer) 1 - 5) (integer) 1 - 6) (integer) 1 -</code> -</pre> -</td> -<td> -<pre> -<code>1) 1) "mget" - 2) (integer) -2 - 3) 1) readonly - 4) (integer) 1 - 5) (integer) -1 - 6) (integer) 1 -</code> -</pre> -</td></tr> -</table> +**Note:** +Redis command names are case-insensitive. -Command arity follows a simple pattern: +## Arity -- positive if command has fixed number of required arguments. -- negative if command has minimum number of required arguments, but may have - more. +Arity is the number of arguments a command expects. +It follows a simple pattern: -Command arity _includes_ counting the command name itself. +* A positive integer means a fixed number of arguments. +* A negative integer means a minimal number of arguments. + +Command arity _always includes_ the command's name itself (and the subcommand when applicable). Examples: -- `GET` arity is 2 since the command only accepts one argument and always has - the format `GET _key_`. -- `MGET` arity is -2 since the command accepts at a minimum one argument, but up - to an unlimited number: `MGET _key1_ [key2] [key3] ...`. - -Also note with `MGET`, the -1 value for "last key position" means the list of -keys may have unlimited length. - -### Flags - -Command flags is @array-reply containing one or more status replies: - -- _write_ - command may result in modifications -- _readonly_ - command will never modify keys -- _denyoom_ - reject command if currently out of memory -- _admin_ - server admin command -- _pubsub_ - pubsub-related command -- _noscript_ - deny this command from scripts -- _random_ - command has random results, dangerous for scripts -- _sort_for_script_ - if called from script, sort output -- _loading_ - allow command while database is loading -- _stale_ - allow command while replica has stale data -- _skip_monitor_ - do not show this command in MONITOR -- _asking_ - cluster related - accept even if importing -- _fast_ - command operates in constant or log(N) time. Used for latency - monitoring. -- _movablekeys_ - keys have no pre-determined position. You must discover keys - yourself. - -### Movable Keys +* `GET`'s arity is _2_ since the command only accepts one argument and always has the format `GET _key_`. +* `MGET`'s arity is _-2_ since the command accepts at least one argument, but possibly multiple ones: `MGET _key1_ [key2] [key3] ...`. + +## Flags + +Command flags are an array. It can contain the following simple strings (status reply): + +* **admin:** the command is an administrative command. +* **asking:** the command is allowed even during hash slot migration. + This flag is relevant in Redis Cluster deployments. +* **blocking:** the command may block the requesting client. +* **denyoom**: the command is rejected if the server's memory usage is too high (see the _maxmemory_ configuration directive). +* **fast:** the command operates in constant or log(N) time. + This flag is used for monitoring latency with the `LATENCY` command. +* **loading:** the command is allowed while the database is loading. +* **movablekeys:** the _first key_, _last key_, and _step_ values don't determine all key positions. + Clients need to use `COMMAND GETKEYS` or [key specifications][td] in this case. + See below for more details. +* **no_auth:** executing the command doesn't require authentication. +* **no_async_loading:** the command is denied during asynchronous loading (that is when a replica uses disk-less `SWAPDB SYNC`, and allows access to the old dataset). +* **no_mandatory_keys:** the command may accept key name arguments, but these aren't mandatory. +* **no_multi:** the command isn't allowed inside the context of a [transaction](/topics/transactions). +* **noscript:** the command can't be called from [scripts](/topics/eval-intro) or [functions](/topics/functions-intro). +* **pubsub:** the command is related to [Redis Pub/Sub](/topics/pubsub). +* **random**: the command returns random results, which is a concern with verbatim script replication. + As of Redis 7.0, this flag is a [command tip][tb]. +* **readonly:** the command doesn't modify data. +* **sort_for_script:** the command's output is sorted when called from a script. +* **skip_monitor:** the command is not shown in `MONITOR`'s output. +* **skip_slowlog:** the command is not shown in `SLOWLOG`'s output. + As of Redis 7.0, this flag is a [command tip][tb]. +* **stale:** the command is allowed while a replica has stale data. +* **write:** the command may modify data. + +### Movablekeys + +Consider `SORT`: ``` 1) 1) "sort" @@ -99,85 +96,146 @@ Command flags is @array-reply containing one or more status replies: 4) (integer) 1 5) (integer) 1 6) (integer) 1 + ... ``` -Some Redis commands have no predetermined key locations. For those commands, -flag `movablekeys` is added to the command flags @array-reply. Your Redis -Cluster client needs to parse commands marked `movablekeys` to locate all -relevant key positions. +Some Redis commands have no predetermined key locations or are not easy to find. +For those commands, the _movablekeys_ flag indicates that the _first key_, _last key_, and _step_ values are insufficient to find all the keys. + +Here are several examples of commands that have the _movablekeys_ flag: + +* `SORT`: the optional _STORE_, _BY_, and _GET_ modifiers are followed by names of keys. +* `ZUNION`: the _numkeys_ argument specifies the number key name arguments. +* `MIGRATE`: the keys appear _KEYS_ keyword and only when the second argument is the empty string. + +Redis Cluster clients need to use other measures, as follows, to locate the keys for such commands. -Complete list of commands currently requiring key location parsing: +You can use the `COMMAND GETKEYS` command and have your Redis server report all keys of a given command's invocation. -- `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys -- `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts -- `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts -- `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts -- `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts -- `ZDIFF` - keys stop after `numkeys` count arguments -- `ZDIFFSTORE` - keys stop after `numkeys` count arguments -- `EVAL` - keys stop after `numkeys` count arguments -- `EVALSHA` - keys stop after `numkeys` count arguments +As of Redis 7.0, clients can use the [key specifications](#key-specifications) to identify the positions of key names. +The only commands that require using `COMMAND GETKEYS` are `SORT` and `MIGRATE` for clients that parse keys' specifications. -Also see `COMMAND GETKEYS` for getting your Redis server tell you where keys are -in any given command. +For more information, please refer to the [key specifications page][tr]. -### First Key in Argument List +## First key -For most commands the first key is position 1. Position 0 is always the command -name itself. +The position of the command's first key name argument. +For most commands, the first key's position is 1. +Position 0 is always the command name itself. -### Last Key in Argument List +## Last key -Redis commands usually accept one key, two keys, or an unlimited number of keys. +The position of the command's last key name argument. +Redis commands usually accept one, two or multiple number of keys. -If a command accepts one key, the first key and last key positions is 1. +Commands that accept a single key have both _first key_ and _last key_ set to 1. -If a command accepts two keys (e.g. `BRPOPLPUSH`, `SMOVE`, `RENAME`, ...) then -the last key position is the location of the last key in the argument list. +Commands that accept two key name arguments, e.g. `BRPOPLPUSH`, `SMOVE` and `RENAME`, have this value set to the position of their second key. -If a command accepts an unlimited number of keys, the last key position is -1. +Multi-key commands that accept an arbitrary number of keys, such as `MSET`, use the value -1. -### Step Count +## Step -<table style="width:50%"> -<tr><td> -<pre> -<code>1) 1) "mset" +The step, or increment, between the _first key_ and the position of the next key. + +Consider the following two examples: + +``` +1) 1) "mset" 2) (integer) -3 3) 1) write 2) denyoom 4) (integer) 1 5) (integer) -1 6) (integer) 2 -</code> -</pre> -</td> -<td> -<pre> -<code>1) 1) "mget" + ... +``` + +``` +1) 1) "mget" 2) (integer) -2 3) 1) readonly + 2) fast 4) (integer) 1 5) (integer) -1 6) (integer) 1 -</code> -</pre> -</td></tr> -</table> + ... +``` + +The step count allows us to find keys' positions. +For example `MSET`: Its syntax is `MSET _key1_ _val1_ [key2] [val2] [key3] [val3]...`, so the keys are at every other position (step value of _2_). +Unlike `MGET`, which uses a step value of _1_. + +## ACL categories + +This is an array of simple strings that are the ACL categories to which the command belongs. +Please refer to the [Access Control List][ta] page for more information. + +## Command tips + +Helpful information about the command. +To be used by clients/proxies. -Key step count allows us to find key positions in commands like `MSET` where the -format is `MSET _key1_ _val1_ [key2] [val2] [key3] [val3]...`. +Please check the [Command tips][tb] page for more information. -In the case of `MSET`, keys are every other position so the step value is 2. -Compare with `MGET` above where the step value is just 1. +## Key specifications + +This is an array consisting of the command's key specifications. +Each element in the array is a map describing a method for locating keys in the command's arguments. + +For more information please check the [key specifications page][td]. + +## Subcommands + +This is an array containing all of the command's subcommands, if any. +Some Redis commands have subcommands (e.g., the `REWRITE` subcommand of `CONFIG`). +Each element in the array represents one subcommand and follows the same specifications as those of `COMMAND`'s reply. + +[ta]: /topics/acl +[tb]: /topics/command-tips +[td]: /topics/key-specs +[tr]: /topics/key-specs @return -@array-reply: nested list of command details. Commands are returned in random -order. +@array-reply: a nested list of command details. + +The order of commands in the array is random. @examples -```cli -COMMAND +The following is `COMMAND`'s output for the `GET` command: + +``` +1) 1) "get" + 2) (integer) 2 + 3) 1) readonly + 2) fast + 4) (integer) 1 + 5) (integer) 1 + 6) (integer) 1 + 7) 1) @read + 2) @string + 3) @fast + 8) (empty array) + 9) 1) 1) "flags" + 2) 1) read + 3) "begin_search" + 4) 1) "type" + 2) "index" + 3) "spec" + 4) 1) "index" + 2) (integer) 1 + 5) "find_keys" + 6) 1) "type" + 2) "range" + 3) "spec" + 4) 1) "lastkey" + 2) (integer) 0 + 3) "keystep" + 4) (integer) 1 + 5) "limit" + 6) (integer) 0 + 10) (empty array) +... ``` diff --git a/iredis/data/commands/config-get.md b/iredis/data/commands/config-get.md index f4a4b34..d2e85a3 100644 --- a/iredis/data/commands/config-get.md +++ b/iredis/data/commands/config-get.md @@ -1,51 +1,44 @@ The `CONFIG GET` command is used to read the configuration parameters of a -running Redis server. Not all the configuration parameters are supported in -Redis 2.4, while Redis 2.6 can read the whole configuration of a server using -this command. +running Redis server. +Not all the configuration parameters are supported in Redis 2.4, while Redis 2.6 +can read the whole configuration of a server using this command. -The symmetric command used to alter the configuration at run time is -`CONFIG SET`. +The symmetric command used to alter the configuration at run time is `CONFIG +SET`. -`CONFIG GET` takes a single argument, which is a glob-style pattern. All the -configuration parameters matching this parameter are reported as a list of -key-value pairs. Example: +`CONFIG GET` takes multiple arguments, which are glob-style patterns. +Any configuration parameter matching any of the patterns are reported as a list +of key-value pairs. +Example: ``` -redis> config get *max-*-entries* -1) "hash-max-zipmap-entries" -2) "512" -3) "list-max-ziplist-entries" -4) "512" -5) "set-max-intset-entries" -6) "512" +redis> config get *max-*-entries* maxmemory + 1) "maxmemory" + 2) "0" + 3) "hash-max-listpack-entries" + 4) "512" + 5) "hash-max-ziplist-entries" + 6) "512" + 7) "set-max-intset-entries" + 8) "512" + 9) "zset-max-listpack-entries" +10) "128" +11) "zset-max-ziplist-entries" +12) "128" ``` You can obtain a list of all the supported configuration parameters by typing `CONFIG GET *` in an open `redis-cli` prompt. All the supported parameters have the same meaning of the equivalent -configuration parameter used in the [redis.conf][hgcarr22rc] file, with the -following important differences: +configuration parameter used in the [redis.conf][hgcarr22rc] file: -[hgcarr22rc]: http://github.com/redis/redis/raw/2.8/redis.conf +[hgcarr22rc]: http://github.com/redis/redis/raw/unstable/redis.conf -- Where bytes or other quantities are specified, it is not possible to use the - `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth), everything - should be specified as a well-formed 64-bit integer, in the base unit of the - configuration directive. -- The save parameter is a single string of space-separated integers. Every pair - of integers represent a seconds/modifications threshold. +Note that you should look at the redis.conf file relevant to the version you're +working with as configuration options might change between versions. The link +above is to the latest development version. -For instance what in `redis.conf` looks like: - -``` -save 900 1 -save 300 10 -``` - -that means, save after 900 seconds if there is at least 1 change to the dataset, -and after 300 seconds if there are at least 10 changes to the dataset, will be -reported by `CONFIG GET` as "900 1 300 10". @return diff --git a/iredis/data/commands/config-help.md b/iredis/data/commands/config-help.md new file mode 100644 index 0000000..5f8bc48 --- /dev/null +++ b/iredis/data/commands/config-help.md @@ -0,0 +1,5 @@ +The `CONFIG HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/config-resetstat.md b/iredis/data/commands/config-resetstat.md index d25a25b..cb0232b 100644 --- a/iredis/data/commands/config-resetstat.md +++ b/iredis/data/commands/config-resetstat.md @@ -2,14 +2,14 @@ Resets the statistics reported by Redis using the `INFO` command. These are the counters that are reset: -- Keyspace hits -- Keyspace misses -- Number of commands processed -- Number of connections received -- Number of expired keys -- Number of rejected connections -- Latest fork(2) time -- The `aof_delayed_fsync` counter +* Keyspace hits +* Keyspace misses +* Number of commands processed +* Number of connections received +* Number of expired keys +* Number of rejected connections +* Latest fork(2) time +* The `aof_delayed_fsync` counter @return diff --git a/iredis/data/commands/config-rewrite.md b/iredis/data/commands/config-rewrite.md index 54509e5..c103156 100644 --- a/iredis/data/commands/config-rewrite.md +++ b/iredis/data/commands/config-rewrite.md @@ -1,35 +1,18 @@ -The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was -started with, applying the minimal changes needed to make it reflect the -configuration currently used by the server, which may be different compared to -the original one because of the use of the `CONFIG SET` command. +The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was started with, applying the minimal changes needed to make it reflect the configuration currently used by the server, which may be different compared to the original one because of the use of the `CONFIG SET` command. The rewrite is performed in a very conservative way: -- Comments and the overall structure of the original redis.conf are preserved as - much as possible. -- If an option already exists in the old redis.conf file, it will be rewritten - at the same position (line number). -- If an option was not already present, but it is set to its default value, it - is not added by the rewrite process. -- If an option was not already present, but it is set to a non-default value, it - is appended at the end of the file. -- Non used lines are blanked. For instance if you used to have multiple `save` - directives, but the current configuration has fewer or none as you disabled - RDB persistence, all the lines will be blanked. +* Comments and the overall structure of the original redis.conf are preserved as much as possible. +* If an option already exists in the old redis.conf file, it will be rewritten at the same position (line number). +* If an option was not already present, but it is set to its default value, it is not added by the rewrite process. +* If an option was not already present, but it is set to a non-default value, it is appended at the end of the file. +* Non used lines are blanked. For instance if you used to have multiple `save` directives, but the current configuration has fewer or none as you disabled RDB persistence, all the lines will be blanked. -CONFIG REWRITE is also able to rewrite the configuration file from scratch if -the original one no longer exists for some reason. However if the server was -started without a configuration file at all, the CONFIG REWRITE will just return -an error. +CONFIG REWRITE is also able to rewrite the configuration file from scratch if the original one no longer exists for some reason. However if the server was started without a configuration file at all, the CONFIG REWRITE will just return an error. ## Atomic rewrite process -In order to make sure the redis.conf file is always consistent, that is, on -errors or crashes you always end with the old file, or the new one, the rewrite -is performed with a single `write(2)` call that has enough content to be at -least as big as the old file. Sometimes additional padding in the form of -comments is added in order to make sure the resulting file is big enough, and -later the file gets truncated to remove the padding at the end. +In order to make sure the redis.conf file is always consistent, that is, on errors or crashes you always end with the old file, or the new one, the rewrite is performed with a single `write(2)` call that has enough content to be at least as big as the old file. Sometimes additional padding in the form of comments is added in order to make sure the resulting file is big enough, and later the file gets truncated to remove the padding at the end. @return diff --git a/iredis/data/commands/config-set.md b/iredis/data/commands/config-set.md index dba09c2..4b0841e 100644 --- a/iredis/data/commands/config-set.md +++ b/iredis/data/commands/config-set.md @@ -1,6 +1,7 @@ The `CONFIG SET` command is used in order to reconfigure the server at run time -without the need to restart Redis. You can change both trivial parameters or -switch from one to another persistence option using this command. +without the need to restart Redis. +You can change both trivial parameters or switch from one to another persistence +option using this command. The list of configuration parameters supported by `CONFIG SET` can be obtained issuing a `CONFIG GET *` command, that is the symmetrical command used to obtain @@ -10,34 +11,18 @@ All the configuration parameters set using `CONFIG SET` are immediately loaded by Redis and will take effect starting with the next command executed. All the supported parameters have the same meaning of the equivalent -configuration parameter used in the [redis.conf][hgcarr22rc] file, with the -following important differences: +configuration parameter used in the [redis.conf][hgcarr22rc] file. -[hgcarr22rc]: http://github.com/redis/redis/raw/6.0/redis.conf +[hgcarr22rc]: http://github.com/redis/redis/raw/unstable/redis.conf -- In options where bytes or other quantities are specified, it is not possible - to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth), - everything should be specified as a well-formed 64-bit integer, in the base - unit of the configuration directive. However since Redis version 3.0 or - greater, it is possible to use `CONFIG SET` with memory units for `maxmemory`, - client output buffers, and replication backlog size. -- The save parameter is a single string of space-separated integers. Every pair - of integers represent a seconds/modifications threshold. - -For instance what in `redis.conf` looks like: - -``` -save 900 1 -save 300 10 -``` - -that means, save after 900 seconds if there is at least 1 change to the dataset, -and after 300 seconds if there are at least 10 changes to the dataset, should be -set using `CONFIG SET SAVE "900 1 300 10"`. +Note that you should look at the redis.conf file relevant to the version you're +working with as configuration options might change between versions. The link +above is to the latest development version. It is possible to switch persistence from RDB snapshotting to append-only file -(and the other way around) using the `CONFIG SET` command. For more information -about how to do that please check the [persistence page][tp]. +(and the other way around) using the `CONFIG SET` command. +For more information about how to do that please check the [persistence +page][tp]. [tp]: /topics/persistence @@ -52,5 +37,5 @@ options are not mutually exclusive. @return -@simple-string-reply: `OK` when the configuration was set properly. Otherwise an -error is returned. +@simple-string-reply: `OK` when the configuration was set properly. +Otherwise an error is returned. diff --git a/iredis/data/commands/config.md b/iredis/data/commands/config.md new file mode 100644 index 0000000..d4b37e9 --- /dev/null +++ b/iredis/data/commands/config.md @@ -0,0 +1,3 @@ +This is a container command for runtime configuration commands. + +To see the list of available commands you can call `CONFIG HELP`. diff --git a/iredis/data/commands/copy.md b/iredis/data/commands/copy.md index f9ff5b9..2803d2a 100644 --- a/iredis/data/commands/copy.md +++ b/iredis/data/commands/copy.md @@ -12,8 +12,8 @@ The command returns an error when the `destination` key already exists. The @integer-reply, specifically: -- `1` if `source` was copied. -- `0` if `source` was not copied. +* `1` if `source` was copied. +* `0` if `source` was not copied. @examples @@ -21,4 +21,4 @@ The command returns an error when the `destination` key already exists. The SET dolly "sheep" COPY dolly clone GET clone -``` +```
\ No newline at end of file diff --git a/iredis/data/commands/debug.md b/iredis/data/commands/debug.md new file mode 100644 index 0000000..fc3c3f3 --- /dev/null +++ b/iredis/data/commands/debug.md @@ -0,0 +1,2 @@ +The `DEBUG` command is an internal command. +It is meant to be used for developing and testing Redis.
\ No newline at end of file diff --git a/iredis/data/commands/decr.md b/iredis/data/commands/decr.md index 78d2a2c..cda121a 100644 --- a/iredis/data/commands/decr.md +++ b/iredis/data/commands/decr.md @@ -1,7 +1,8 @@ -Decrements the number stored at `key` by one. If the key does not exist, it is -set to `0` before performing the operation. An error is returned if the key -contains a value of the wrong type or contains a string that can not be -represented as integer. This operation is limited to **64 bit signed integers**. +Decrements the number stored at `key` by one. +If the key does not exist, it is set to `0` before performing the operation. +An error is returned if the key contains a value of the wrong type or contains a +string that can not be represented as integer. +This operation is limited to **64 bit signed integers**. See `INCR` for extra information on increment/decrement operations. diff --git a/iredis/data/commands/decrby.md b/iredis/data/commands/decrby.md index 3d000a0..b2e823b 100644 --- a/iredis/data/commands/decrby.md +++ b/iredis/data/commands/decrby.md @@ -1,7 +1,8 @@ -Decrements the number stored at `key` by `decrement`. If the key does not exist, -it is set to `0` before performing the operation. An error is returned if the -key contains a value of the wrong type or contains a string that can not be -represented as integer. This operation is limited to 64 bit signed integers. +Decrements the number stored at `key` by `decrement`. +If the key does not exist, it is set to `0` before performing the operation. +An error is returned if the key contains a value of the wrong type or contains a +string that can not be represented as integer. +This operation is limited to 64 bit signed integers. See `INCR` for extra information on increment/decrement operations. diff --git a/iredis/data/commands/del.md b/iredis/data/commands/del.md index fbb05ec..d5fcbac 100644 --- a/iredis/data/commands/del.md +++ b/iredis/data/commands/del.md @@ -1,4 +1,5 @@ -Removes the specified keys. A key is ignored if it does not exist. +Removes the specified keys. +A key is ignored if it does not exist. @return diff --git a/iredis/data/commands/dump.md b/iredis/data/commands/dump.md index b81d7af..d740033 100644 --- a/iredis/data/commands/dump.md +++ b/iredis/data/commands/dump.md @@ -1,20 +1,23 @@ Serialize the value stored at key in a Redis-specific format and return it to -the user. The returned value can be synthesized back into a Redis key using the -`RESTORE` command. +the user. +The returned value can be synthesized back into a Redis key using the `RESTORE` +command. The serialization format is opaque and non-standard, however it has a few semantic characteristics: -- It contains a 64-bit checksum that is used to make sure errors will be - detected. The `RESTORE` command makes sure to check the checksum before - synthesizing a key using the serialized value. -- Values are encoded in the same format used by RDB. -- An RDB version is encoded inside the serialized value, so that different Redis +* It contains a 64-bit checksum that is used to make sure errors will be + detected. + The `RESTORE` command makes sure to check the checksum before synthesizing a + key using the serialized value. +* Values are encoded in the same format used by RDB. +* An RDB version is encoded inside the serialized value, so that different Redis versions with incompatible RDB formats will refuse to process the serialized value. -The serialized value does NOT contain expire information. In order to capture -the time to live of the current value the `PTTL` command should be used. +The serialized value does NOT contain expire information. +In order to capture the time to live of the current value the `PTTL` command +should be used. If `key` does not exist a nil bulk reply is returned. diff --git a/iredis/data/commands/eval.md b/iredis/data/commands/eval.md index f85d727..079edeb 100644 --- a/iredis/data/commands/eval.md +++ b/iredis/data/commands/eval.md @@ -1,913 +1,24 @@ -## Introduction to EVAL +Invoke the execution of a server-side Lua script. -`EVAL` and `EVALSHA` are used to evaluate scripts using the Lua interpreter -built into Redis starting from version 2.6.0. +The first argument is the script's source code. +Scripts are written in [Lua](https://lua.org) and executed by the embedded [Lua 5.1](/topics/lua-api) interpreter in Redis. -The first argument of `EVAL` is a Lua 5.1 script. The script does not need to -define a Lua function (and should not). It is just a Lua program that will run -in the context of the Redis server. +The second argument is the number of input key name arguments, followed by all the keys accessed by the script. +These names of input keys are available to the script as the [_KEYS_ global runtime variable](/topics/lua-api#the-keys-global-variable) +Any additional input arguments **should not** represent names of keys. -The second argument of `EVAL` is the number of arguments that follows the script -(starting from the third argument) that represent Redis key names. The arguments -can be accessed by Lua using the `!KEYS` global variable in the form of a -one-based array (so `KEYS[1]`, `KEYS[2]`, ...). +**Important:** +to ensure the correct execution of scripts, both in standalone and clustered deployments, all names of keys that a script accesses must be explicitly provided as input key arguments. +The script **should only** access keys whose names are given as input arguments. +Scripts **should never** access keys with programmatically-generated names or based on the contents of data structures stored in the database. -All the additional arguments should not represent key names and can be accessed -by Lua using the `ARGV` global variable, very similarly to what happens with -keys (so `ARGV[1]`, `ARGV[2]`, ...). +Please refer to the [Redis Programmability](/topics/programmability) and [Introduction to Eval Scripts](/topics/eval-intro) for more information about Lua scripts. -The following example should clarify what stated above: +@examples -``` -> eval "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}" 2 key1 key2 first second -1) "key1" -2) "key2" -3) "first" -4) "second" -``` - -Note: as you can see Lua arrays are returned as Redis multi bulk replies, that -is a Redis return type that your client library will likely convert into an -Array type in your programming language. - -It is possible to call Redis commands from a Lua script using two different Lua -functions: - -- `redis.call()` -- `redis.pcall()` - -`redis.call()` is similar to `redis.pcall()`, the only difference is that if a -Redis command call will result in an error, `redis.call()` will raise a Lua -error that in turn will force `EVAL` to return an error to the command caller, -while `redis.pcall` will trap the error and return a Lua table representing the -error. - -The arguments of the `redis.call()` and `redis.pcall()` functions are all the -arguments of a well formed Redis command: - -``` -> eval "return redis.call('set','foo','bar')" 0 -OK -``` - -The above script sets the key `foo` to the string `bar`. However it violates the -`EVAL` command semantics as all the keys that the script uses should be passed -using the `!KEYS` array: - -``` -> eval "return redis.call('set',KEYS[1],'bar')" 1 foo -OK -``` - -All Redis commands must be analyzed before execution to determine which keys the -command will operate on. In order for this to be true for `EVAL`, keys must be -passed explicitly. This is useful in many ways, but especially to make sure -Redis Cluster can forward your request to the appropriate cluster node. - -Note this rule is not enforced in order to provide the user with opportunities -to abuse the Redis single instance configuration, at the cost of writing scripts -not compatible with Redis Cluster. - -Lua scripts can return a value that is converted from the Lua type to the Redis -protocol using a set of conversion rules. - -## Conversion between Lua and Redis data types - -Redis return values are converted into Lua data types when Lua calls a Redis -command using `call()` or `pcall()`. Similarly, Lua data types are converted -into the Redis protocol when calling a Redis command and when a Lua script -returns a value, so that scripts can control what `EVAL` will return to the -client. - -This conversion between data types is designed in a way that if a Redis type is -converted into a Lua type, and then the result is converted back into a Redis -type, the result is the same as the initial value. - -In other words there is a one-to-one conversion between Lua and Redis types. The -following table shows you all the conversions rules: - -**Redis to Lua** conversion table. - -- Redis integer reply -> Lua number -- Redis bulk reply -> Lua string -- Redis multi bulk reply -> Lua table (may have other Redis data types nested) -- Redis status reply -> Lua table with a single `ok` field containing the status -- Redis error reply -> Lua table with a single `err` field containing the error -- Redis Nil bulk reply and Nil multi bulk reply -> Lua false boolean type - -**Lua to Redis** conversion table. - -- Lua number -> Redis integer reply (the number is converted into an integer) -- Lua string -> Redis bulk reply -- Lua table (array) -> Redis multi bulk reply (truncated to the first nil inside - the Lua array if any) -- Lua table with a single `ok` field -> Redis status reply -- Lua table with a single `err` field -> Redis error reply -- Lua boolean false -> Redis Nil bulk reply. - -There is an additional Lua-to-Redis conversion rule that has no corresponding -Redis to Lua conversion rule: - -- Lua boolean true -> Redis integer reply with value of 1. - -Lastly, there are three important rules to note: - -- Lua has a single numerical type, Lua numbers. There is no distinction between - integers and floats. So we always convert Lua numbers into integer replies, - removing the decimal part of the number if any. **If you want to return a - float from Lua you should return it as a string**, exactly like Redis itself - does (see for instance the `ZSCORE` command). -- There is - [no simple way to have nils inside Lua arrays](http://www.lua.org/pil/19.1.html), - this is a result of Lua table semantics, so when Redis converts a Lua array - into Redis protocol the conversion is stopped if a nil is encountered. -- When a Lua table contains keys (and their values), the converted Redis reply - will **not** include them. - -**RESP3 mode conversion rules**: note that the Lua engine can work in RESP3 mode -using the new Redis 6 protocol. In this case there are additional conversion -rules, and certain conversions are also modified compared to the RESP2 mode. -Please refer to the RESP3 section of this document for more information. +The following example will run a script that returns the first argument that it gets. -Here are a few conversion examples: - -``` -> eval "return 10" 0 -(integer) 10 - -> eval "return {1,2,{3,'Hello World!'}}" 0 -1) (integer) 1 -2) (integer) 2 -3) 1) (integer) 3 - 2) "Hello World!" - -> eval "return redis.call('get','foo')" 0 -"bar" -``` - -The last example shows how it is possible to receive the exact return value of -`redis.call()` or `redis.pcall()` from Lua that would be returned if the command -was called directly. - -In the following example we can see how floats and arrays containing nils and -keys are handled: - -``` -> eval "return {1,2,3.3333,somekey='somevalue','foo',nil,'bar'}" 0 -1) (integer) 1 -2) (integer) 2 -3) (integer) 3 -4) "foo" -``` - -As you can see 3.333 is converted into 3, _somekey_ is excluded, and the _bar_ -string is never returned as there is a nil before. - -## Helper functions to return Redis types - -There are two helper functions to return Redis types from Lua. - -- `redis.error_reply(error_string)` returns an error reply. This function simply - returns a single field table with the `err` field set to the specified string - for you. -- `redis.status_reply(status_string)` returns a status reply. This function - simply returns a single field table with the `ok` field set to the specified - string for you. - -There is no difference between using the helper functions or directly returning -the table with the specified format, so the following two forms are equivalent: - - return {err="My Error"} - return redis.error_reply("My Error") - -## Atomicity of scripts - -Redis uses the same Lua interpreter to run all the commands. Also Redis -guarantees that a script is executed in an atomic way: no other script or Redis -command will be executed while a script is being executed. This semantic is -similar to the one of `MULTI` / `EXEC`. From the point of view of all the other -clients the effects of a script are either still not visible or already -completed. - -However this also means that executing slow scripts is not a good idea. It is -not hard to create fast scripts, as the script overhead is very low, but if you -are going to use slow scripts you should be aware that while the script is -running no other client can execute commands. - -## Error handling - -As already stated, calls to `redis.call()` resulting in a Redis command error -will stop the execution of the script and return an error, in a way that makes -it obvious that the error was generated by a script: - -``` -> del foo -(integer) 1 -> lpush foo a -(integer) 1 -> eval "return redis.call('get','foo')" 0 -(error) ERR Error running script (call to f_6b1bf486c81ceb7edf3c093f4c48582e38c0e791): ERR Operation against a key holding the wrong kind of value ``` - -Using `redis.pcall()` no error is raised, but an error object is returned in the -format specified above (as a Lua table with an `err` field). The script can pass -the exact error to the user by returning the error object returned by -`redis.pcall()`. - -## Running Lua under low memory conditions - -When the memory usage in Redis exceeds the `maxmemory` limit, the first write -command encountered in the Lua script that uses additional memory will cause the -script to abort (unless `redis.pcall` was used). However, one thing to caution -here is that if the first write command does not use additional memory such as -DEL, LREM, or SREM, etc, Redis will allow it to run and all subsequent commands -in the Lua script will execute to completion for atomicity. If the subsequent -writes in the script generate additional memory, the Redis memory usage can go -over `maxmemory`. - -Another possible way for Lua script to cause Redis memory usage to go above -`maxmemory` happens when the script execution starts when Redis is slightly -below `maxmemory` so the first write command in the script is allowed. As the -script executes, subsequent write commands continue to generate memory and -causes the Redis server to go above `maxmemory`. - -In those scenarios, it is recommended to configure the `maxmemory-policy` not to -use `noeviction`. Also Lua scripts should be short so that evictions of items -can happen in between Lua scripts. - -## Bandwidth and EVALSHA - -The `EVAL` command forces you to send the script body again and again. Redis -does not need to recompile the script every time as it uses an internal caching -mechanism, however paying the cost of the additional bandwidth may not be -optimal in many contexts. - -On the other hand, defining commands using a special command or via `redis.conf` -would be a problem for a few reasons: - -- Different instances may have different implementations of a command. - -- Deployment is hard if we have to make sure all instances contain a given - command, especially in a distributed environment. - -- Reading application code, the complete semantics might not be clear since the - application calls commands defined server side. - -In order to avoid these problems while avoiding the bandwidth penalty, Redis -implements the `EVALSHA` command. - -`EVALSHA` works exactly like `EVAL`, but instead of having a script as the first -argument it has the SHA1 digest of a script. The behavior is the following: - -- If the server still remembers a script with a matching SHA1 digest, the script - is executed. - -- If the server does not remember a script with this SHA1 digest, a special - error is returned telling the client to use `EVAL` instead. - -Example: - -``` -> set foo bar -OK -> eval "return redis.call('get','foo')" 0 -"bar" -> evalsha 6b1bf486c81ceb7edf3c093f4c48582e38c0e791 0 -"bar" -> evalsha ffffffffffffffffffffffffffffffffffffffff 0 -(error) `NOSCRIPT` No matching script. Please use `EVAL`. -``` - -The client library implementation can always optimistically send `EVALSHA` under -the hood even when the client actually calls `EVAL`, in the hope the script was -already seen by the server. If the `NOSCRIPT` error is returned `EVAL` will be -used instead. - -Passing keys and arguments as additional `EVAL` arguments is also very useful in -this context as the script string remains constant and can be efficiently cached -by Redis. - -## Script cache semantics - -Executed scripts are guaranteed to be in the script cache of a given execution -of a Redis instance forever. This means that if an `EVAL` is performed against a -Redis instance all the subsequent `EVALSHA` calls will succeed. - -The reason why scripts can be cached for long time is that it is unlikely for a -well written application to have enough different scripts to cause memory -problems. Every script is conceptually like the implementation of a new command, -and even a large application will likely have just a few hundred of them. Even -if the application is modified many times and scripts will change, the memory -used is negligible. - -The only way to flush the script cache is by explicitly calling the -`SCRIPT FLUSH` command, which will _completely flush_ the scripts cache removing -all the scripts executed so far. - -This is usually needed only when the instance is going to be instantiated for -another customer or application in a cloud environment. - -Also, as already mentioned, restarting a Redis instance flushes the script -cache, which is not persistent. However from the point of view of the client -there are only two ways to make sure a Redis instance was not restarted between -two different commands. - -- The connection we have with the server is persistent and was never closed so - far. -- The client explicitly checks the `runid` field in the `INFO` command in order - to make sure the server was not restarted and is still the same process. - -Practically speaking, for the client it is much better to simply assume that in -the context of a given connection, cached scripts are guaranteed to be there -unless an administrator explicitly called the `SCRIPT FLUSH` command. - -The fact that the user can count on Redis not removing scripts is semantically -useful in the context of pipelining. - -For instance an application with a persistent connection to Redis can be sure -that if a script was sent once it is still in memory, so EVALSHA can be used -against those scripts in a pipeline without the chance of an error being -generated due to an unknown script (we'll see this problem in detail later). - -A common pattern is to call `SCRIPT LOAD` to load all the scripts that will -appear in a pipeline, then use `EVALSHA` directly inside the pipeline without -any need to check for errors resulting from the script hash not being -recognized. - -## The SCRIPT command - -Redis offers a SCRIPT command that can be used in order to control the scripting -subsystem. SCRIPT currently accepts three different commands: - -- `SCRIPT FLUSH` - - This command is the only way to force Redis to flush the scripts cache. It is - most useful in a cloud environment where the same instance can be reassigned - to a different user. It is also useful for testing client libraries' - implementations of the scripting feature. - -- `SCRIPT EXISTS sha1 sha2 ... shaN` - - Given a list of SHA1 digests as arguments this command returns an array of 1 - or 0, where 1 means the specific SHA1 is recognized as a script already - present in the scripting cache, while 0 means that a script with this SHA1 was - never seen before (or at least never seen after the latest SCRIPT FLUSH - command). - -- `SCRIPT LOAD script` - - This command registers the specified script in the Redis script cache. The - command is useful in all the contexts where we want to make sure that - `EVALSHA` will not fail (for instance during a pipeline or MULTI/EXEC - operation), without the need to actually execute the script. - -- `SCRIPT KILL` - - This command is the only way to interrupt a long-running script that reaches - the configured maximum execution time for scripts. The SCRIPT KILL command can - only be used with scripts that did not modify the dataset during their - execution (since stopping a read-only script does not violate the scripting - engine's guaranteed atomicity). See the next sections for more information - about long running scripts. - -## Scripts as pure functions - -_Note: starting with Redis 5, scripts are always replicated as effects and not -sending the script verbatim. So the following section is mostly applicable to -Redis version 4 or older._ - -A very important part of scripting is writing scripts that are pure functions. -Scripts executed in a Redis instance are, by default, propagated to replicas and -to the AOF file by sending the script itself -- not the resulting commands. - -The reason is that sending a script to another Redis instance is often much -faster than sending the multiple commands the script generates, so if the client -is sending many scripts to the master, converting the scripts into individual -commands for the replica / AOF would result in too much bandwidth for the -replication link or the Append Only File (and also too much CPU since -dispatching a command received via network is a lot more work for Redis compared -to dispatching a command invoked by Lua scripts). - -Normally replicating scripts instead of the effects of the scripts makes sense, -however not in all the cases. So starting with Redis 3.2, the scripting engine -is able to, alternatively, replicate the sequence of write commands resulting -from the script execution, instead of replication the script itself. See the -next section for more information. In this section we'll assume that scripts are -replicated by sending the whole script. Let's call this replication mode **whole -scripts replication**. - -The main drawback with the _whole scripts replication_ approach is that scripts -are required to have the following property: - -- The script must always evaluates the same Redis _write_ commands with the same - arguments given the same input data set. Operations performed by the script - cannot depend on any hidden (non-explicit) information or state that may - change as script execution proceeds or between different executions of the - script, nor can it depend on any external input from I/O devices. - -Things like using the system time, calling Redis random commands like -`RANDOMKEY`, or using Lua random number generator, could result into scripts -that will not always evaluate in the same way. - -In order to enforce this behavior in scripts Redis does the following: - -- Lua does not export commands to access the system time or other external - state. -- Redis will block the script with an error if a script calls a Redis command - able to alter the data set **after** a Redis _random_ command like - `RANDOMKEY`, `SRANDMEMBER`, `TIME`. This means that if a script is read-only - and does not modify the data set it is free to call those commands. Note that - a _random command_ does not necessarily mean a command that uses random - numbers: any non-deterministic command is considered a random command (the - best example in this regard is the `TIME` command). -- In Redis version 4, commands that may return elements in random order, like - `SMEMBERS` (because Redis Sets are _unordered_) have a different behavior when - called from Lua, and undergo a silent lexicographical sorting filter before - returning data to Lua scripts. So `redis.call("smembers",KEYS[1])` will always - return the Set elements in the same order, while the same command invoked from - normal clients may return different results even if the key contains exactly - the same elements. However starting with Redis 5 there is no longer such - ordering step, because Redis 5 replicates scripts in a way that no longer - needs non-deterministic commands to be converted into deterministic ones. In - general, even when developing for Redis 4, never assume that certain commands - in Lua will be ordered, but instead rely on the documentation of the original - command you call to see the properties it provides. -- Lua pseudo random number generation functions `math.random` and - `math.randomseed` are modified in order to always have the same seed every - time a new script is executed. This means that calling `math.random` will - always generate the same sequence of numbers every time a script is executed - if `math.randomseed` is not used. - -However the user is still able to write commands with random behavior using the -following simple trick. Imagine I want to write a Redis script that will -populate a list with N random integers. - -I can start with this small Ruby program: - -``` -require 'rubygems' -require 'redis' - -r = Redis.new - -RandomPushScript = <<EOF - local i = tonumber(ARGV[1]) - local res - while (i > 0) do - res = redis.call('lpush',KEYS[1],math.random()) - i = i-1 - end - return res -EOF - -r.del(:mylist) -puts r.eval(RandomPushScript,[:mylist],[10,rand(2**32)]) -``` - -Every time this script executed the resulting list will have exactly the -following elements: - -``` -> lrange mylist 0 -1 - 1) "0.74509509873814" - 2) "0.87390407681181" - 3) "0.36876626981831" - 4) "0.6921941534114" - 5) "0.7857992587545" - 6) "0.57730350670279" - 7) "0.87046522734243" - 8) "0.09637165539729" - 9) "0.74990198051087" -10) "0.17082803611217" -``` - -In order to make it a pure function, but still be sure that every invocation of -the script will result in different random elements, we can simply add an -additional argument to the script that will be used in order to seed the Lua -pseudo-random number generator. The new script is as follows: - +> EVAL "return ARGV[1]" 0 hello +"hello" ``` -RandomPushScript = <<EOF - local i = tonumber(ARGV[1]) - local res - math.randomseed(tonumber(ARGV[2])) - while (i > 0) do - res = redis.call('lpush',KEYS[1],math.random()) - i = i-1 - end - return res -EOF - -r.del(:mylist) -puts r.eval(RandomPushScript,1,:mylist,10,rand(2**32)) -``` - -What we are doing here is sending the seed of the PRNG as one of the arguments. -This way the script output will be the same given the same arguments, but we are -changing one of the arguments in every invocation, generating the random seed -client-side. The seed will be propagated as one of the arguments both in the -replication link and in the Append Only File, guaranteeing that the same changes -will be generated when the AOF is reloaded or when the replica processes the -script. - -Note: an important part of this behavior is that the PRNG that Redis implements -as `math.random` and `math.randomseed` is guaranteed to have the same output -regardless of the architecture of the system running Redis. 32-bit, 64-bit, -big-endian and little-endian systems will all produce the same output. - -## Replicating commands instead of scripts - -_Note: starting with Redis 5, the replication method described in this section -(scripts effects replication) is the default and does not need to be explicitly -enabled._ - -Starting with Redis 3.2, it is possible to select an alternative replication -method. Instead of replication whole scripts, we can just replicate single write -commands generated by the script. We call this **script effects replication**. - -In this replication mode, while Lua scripts are executed, Redis collects all the -commands executed by the Lua scripting engine that actually modify the dataset. -When the script execution finishes, the sequence of commands that the script -generated are wrapped into a MULTI / EXEC transaction and are sent to replicas -and AOF. - -This is useful in several ways depending on the use case: - -- When the script is slow to compute, but the effects can be summarized by a few - write commands, it is a shame to re-compute the script on the replicas or when - reloading the AOF. In this case to replicate just the effect of the script is - much better. -- When script effects replication is enabled, the controls about non - deterministic functions are disabled. You can, for example, use the `TIME` or - `SRANDMEMBER` commands inside your scripts freely at any place. -- The Lua PRNG in this mode is seeded randomly at every call. - -In order to enable script effects replication, you need to issue the following -Lua command before any write operated by the script: - - redis.replicate_commands() - -The function returns true if the script effects replication was enabled, -otherwise if the function was called after the script already called some write -command, it returns false, and normal whole script replication is used. - -## Selective replication of commands - -When script effects replication is selected (see the previous section), it is -possible to have more control in the way commands are replicated to replicas and -AOF. This is a very advanced feature since **a misuse can do damage** by -breaking the contract that the master, replicas, and AOF, all must contain the -same logical content. - -However this is a useful feature since, sometimes, we need to execute certain -commands only in the master in order to create, for example, intermediate -values. - -Think at a Lua script where we perform an intersection between two sets. Pick -five random elements, and create a new set with this five random elements. -Finally we delete the temporary key representing the intersection between the -two original sets. What we want to replicate is only the creation of the new set -with the five elements. It's not useful to also replicate the commands creating -the temporary key. - -For this reason, Redis 3.2 introduces a new command that only works when script -effects replication is enabled, and is able to control the scripting replication -engine. The command is called `redis.set_repl()` and fails raising an error if -called when script effects replication is disabled. - -The command can be called with four different arguments: - - redis.set_repl(redis.REPL_ALL) -- Replicate to AOF and replicas. - redis.set_repl(redis.REPL_AOF) -- Replicate only to AOF. - redis.set_repl(redis.REPL_REPLICA) -- Replicate only to replicas (Redis >= 5) - redis.set_repl(redis.REPL_SLAVE) -- Used for backward compatibility, the same as REPL_REPLICA. - redis.set_repl(redis.REPL_NONE) -- Don't replicate at all. - -By default the scripting engine is always set to `REPL_ALL`. By calling this -function the user can switch on/off AOF and or replicas propagation, and turn -them back later at her/his wish. - -A simple example follows: - - redis.replicate_commands() -- Enable effects replication. - redis.call('set','A','1') - redis.set_repl(redis.REPL_NONE) - redis.call('set','B','2') - redis.set_repl(redis.REPL_ALL) - redis.call('set','C','3') - -After running the above script, the result is that only keys A and C will be -created on replicas and AOF. - -## Global variables protection - -Redis scripts are not allowed to create global variables, in order to avoid -leaking data into the Lua state. If a script needs to maintain state between -calls (a pretty uncommon need) it should use Redis keys instead. - -When global variable access is attempted the script is terminated and EVAL -returns with an error: - -``` -redis 127.0.0.1:6379> eval 'a=10' 0 -(error) ERR Error running script (call to f_933044db579a2f8fd45d8065f04a8d0249383e57): user_script:1: Script attempted to create global variable 'a' -``` - -Accessing a _non existing_ global variable generates a similar error. - -Using Lua debugging functionality or other approaches like altering the meta -table used to implement global protections in order to circumvent globals -protection is not hard. However it is difficult to do it accidentally. If the -user messes with the Lua global state, the consistency of AOF and replication is -not guaranteed: don't do it. - -Note for Lua newbies: in order to avoid using global variables in your scripts -simply declare every variable you are going to use using the _local_ keyword. - -## Using SELECT inside scripts - -It is possible to call `SELECT` inside Lua scripts like with normal clients, -However one subtle aspect of the behavior changes between Redis 2.8.11 and Redis -2.8.12. Before the 2.8.12 release the database selected by the Lua script was -_transferred_ to the calling script as current database. Starting from Redis -2.8.12 the database selected by the Lua script only affects the execution of the -script itself, but does not modify the database selected by the client calling -the script. - -The semantic change between patch level releases was needed since the old -behavior was inherently incompatible with the Redis replication layer and was -the cause of bugs. - -## Using Lua scripting in RESP3 mode - -Starting with Redis version 6, the server supports two different protocols. One -is called RESP2, and is the old protocol: all the new connections to the server -start in this mode. However clients are able to negotiate the new protocol using -the `HELLO` command: this way the connection is put in RESP3 mode. In this mode -certain commands, like for instance `HGETALL`, reply with a new data type (the -Map data type in this specific case). The RESP3 protocol is semantically more -powerful, however most scripts are OK with using just RESP2. - -The Lua engine always assumes to run in RESP2 mode when talking with Redis, so -whatever the connection that is invoking the `EVAL` or `EVALSHA` command is in -RESP2 or RESP3 mode, Lua scripts will, by default, still see the same kind of -replies they used to see in the past from Redis, when calling commands using the -`redis.call()` built-in function. - -However Lua scripts running in Redis 6 or greater, are able to switch to RESP3 -mode, and get the replies using the new available types. Similarly Lua scripts -are able to reply to clients using the new types. Please make sure to understand -[the capabilities for RESP3](https://github.com/antirez/resp3) before continuing -reading this section. - -In order to switch to RESP3 a script should call this function: - - redis.setresp(3) - -Note that a script can switch back and forth from RESP3 and RESP2 by calling the -function with the argument '3' or '2'. - -At this point the new conversions are available, specifically: - -**Redis to Lua** conversion table specific to RESP3: - -- Redis map reply -> Lua table with a single `map` field containing a Lua table - representing the fields and values of the map. -- Redis set reply -> Lua table with a single `set` field containing a Lua table - representing the elements of the set as fields, having as value just `true`. -- Redis new RESP3 single null value -> Lua nil. -- Redis true reply -> Lua true boolean value. -- Redis false reply -> Lua false boolean value. -- Redis double reply -> Lua table with a single `score` field containing a Lua - number representing the double value. -- All the RESP2 old conversions still apply. - -**Lua to Redis** conversion table specific for RESP3. - -- Lua boolean -> Redis boolean true or false. **Note that this is a change - compared to the RESP2 mode**, where returning true from Lua returned the - number 1 to the Redis client, and returning false used to return NULL. -- Lua table with a single `map` field set to a field-value Lua table -> Redis - map reply. -- Lua table with a single `set` field set to a field-value Lua table -> Redis - set reply, the values are discarded and can be anything. -- Lua table with a single `double` field set to a field-value Lua table -> Redis - double reply. -- Lua null -> Redis RESP3 new null reply (protocol `"_\r\n"`). -- All the RESP2 old conversions still apply unless specified above. - -There is one key thing to understand: in case Lua replies with RESP3 types, but -the connection calling Lua is in RESP2 mode, Redis will automatically convert -the RESP3 protocol to RESP2 compatible protocol, as it happens for normal -commands. For instance returning a map type to a connection in RESP2 mode will -have the effect of returning a flat array of fields and values. - -## Available libraries - -The Redis Lua interpreter loads the following Lua libraries: - -- `base` lib. -- `table` lib. -- `string` lib. -- `math` lib. -- `struct` lib. -- `cjson` lib. -- `cmsgpack` lib. -- `bitop` lib. -- `redis.sha1hex` function. -- `redis.breakpoint and redis.debug` function in the context of the - [Redis Lua debugger](/topics/ldb). - -Every Redis instance is _guaranteed_ to have all the above libraries so you can -be sure that the environment for your Redis scripts is always the same. - -struct, CJSON and cmsgpack are external libraries, all the other libraries are -standard Lua libraries. - -### struct - -struct is a library for packing/unpacking structures within Lua. - -``` -Valid formats: -> - big endian -< - little endian -![num] - alignment -x - padding -b/B - signed/unsigned byte -h/H - signed/unsigned short -l/L - signed/unsigned long -T - size_t -i/In - signed/unsigned integer with size `n' (default is size of int) -cn - sequence of `n' chars (from/to a string); when packing, n==0 means - the whole string; when unpacking, n==0 means use the previous - read number as the string length -s - zero-terminated string -f - float -d - double -' ' - ignored -``` - -Example: - -``` -127.0.0.1:6379> eval 'return struct.pack("HH", 1, 2)' 0 -"\x01\x00\x02\x00" -127.0.0.1:6379> eval 'return {struct.unpack("HH", ARGV[1])}' 0 "\x01\x00\x02\x00" -1) (integer) 1 -2) (integer) 2 -3) (integer) 5 -127.0.0.1:6379> eval 'return struct.size("HH")' 0 -(integer) 4 -``` - -### CJSON - -The CJSON library provides extremely fast JSON manipulation within Lua. - -Example: - -``` -redis 127.0.0.1:6379> eval 'return cjson.encode({["foo"]= "bar"})' 0 -"{\"foo\":\"bar\"}" -redis 127.0.0.1:6379> eval 'return cjson.decode(ARGV[1])["foo"]' 0 "{\"foo\":\"bar\"}" -"bar" -``` - -### cmsgpack - -The cmsgpack library provides simple and fast MessagePack manipulation within -Lua. - -Example: - -``` -127.0.0.1:6379> eval 'return cmsgpack.pack({"foo", "bar", "baz"})' 0 -"\x93\xa3foo\xa3bar\xa3baz" -127.0.0.1:6379> eval 'return cmsgpack.unpack(ARGV[1])' 0 "\x93\xa3foo\xa3bar\xa3baz" -1) "foo" -2) "bar" -3) "baz" -``` - -### bitop - -The Lua Bit Operations Module adds bitwise operations on numbers. It is -available for scripting in Redis since version 2.8.18. - -Example: - -``` -127.0.0.1:6379> eval 'return bit.tobit(1)' 0 -(integer) 1 -127.0.0.1:6379> eval 'return bit.bor(1,2,4,8,16,32,64,128)' 0 -(integer) 255 -127.0.0.1:6379> eval 'return bit.tohex(422342)' 0 -"000671c6" -``` - -It supports several other functions: `bit.tobit`, `bit.tohex`, `bit.bnot`, -`bit.band`, `bit.bor`, `bit.bxor`, `bit.lshift`, `bit.rshift`, `bit.arshift`, -`bit.rol`, `bit.ror`, `bit.bswap`. All available functions are documented in the -[Lua BitOp documentation](http://bitop.luajit.org/api.html) - -### `redis.sha1hex` - -Perform the SHA1 of the input string. - -Example: - -``` -127.0.0.1:6379> eval 'return redis.sha1hex(ARGV[1])' 0 "foo" -"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" -``` - -## Emitting Redis logs from scripts - -It is possible to write to the Redis log file from Lua scripts using the -`redis.log` function. - -``` -redis.log(loglevel,message) -``` - -`loglevel` is one of: - -- `redis.LOG_DEBUG` -- `redis.LOG_VERBOSE` -- `redis.LOG_NOTICE` -- `redis.LOG_WARNING` - -They correspond directly to the normal Redis log levels. Only logs emitted by -scripting using a log level that is equal or greater than the currently -configured Redis instance log level will be emitted. - -The `message` argument is simply a string. Example: - -``` -redis.log(redis.LOG_WARNING,"Something is wrong with this script.") -``` - -Will generate the following: - -``` -[32343] 22 Mar 15:21:39 # Something is wrong with this script. -``` - -## Sandbox and maximum execution time - -Scripts should never try to access the external system, like the file system or -any other system call. A script should only operate on Redis data and passed -arguments. - -Scripts are also subject to a maximum execution time (five seconds by default). -This default timeout is huge since a script should usually run in under a -millisecond. The limit is mostly to handle accidental infinite loops created -during development. - -It is possible to modify the maximum time a script can be executed with -millisecond precision, either via `redis.conf` or using the CONFIG GET / CONFIG -SET command. The configuration parameter affecting max execution time is called -`lua-time-limit`. - -When a script reaches the timeout it is not automatically terminated by Redis -since this violates the contract Redis has with the scripting engine to ensure -that scripts are atomic. Interrupting a script means potentially leaving the -dataset with half-written data. For this reasons when a script executes for more -than the specified time the following happens: - -- Redis logs that a script is running too long. -- It starts accepting commands again from other clients, but will reply with a - BUSY error to all the clients sending normal commands. The only allowed - commands in this status are `SCRIPT KILL` and `SHUTDOWN NOSAVE`. -- It is possible to terminate a script that executes only read-only commands - using the `SCRIPT KILL` command. This does not violate the scripting semantic - as no data was yet written to the dataset by the script. -- If the script already called write commands the only allowed command becomes - `SHUTDOWN NOSAVE` that stops the server without saving the current data set on - disk (basically the server is aborted). - -## EVALSHA in the context of pipelining - -Care should be taken when executing `EVALSHA` in the context of a pipelined -request, since even in a pipeline the order of execution of commands must be -guaranteed. If `EVALSHA` will return a `NOSCRIPT` error the command can not be -reissued later otherwise the order of execution is violated. - -The client library implementation should take one of the following approaches: - -- Always use plain `EVAL` when in the context of a pipeline. - -- Accumulate all the commands to send into the pipeline, then check for `EVAL` - commands and use the `SCRIPT EXISTS` command to check if all the scripts are - already defined. If not, add `SCRIPT LOAD` commands on top of the pipeline as - required, and use `EVALSHA` for all the `EVAL` calls. - -## Debugging Lua scripts - -Starting with Redis 3.2, Redis has support for native Lua debugging. The Redis -Lua debugger is a remote debugger consisting of a server, which is Redis itself, -and a client, which is by default `redis-cli`. - -The Lua debugger is described in the [Lua scripts debugging](/topics/ldb) -section of the Redis documentation. diff --git a/iredis/data/commands/eval_ro.md b/iredis/data/commands/eval_ro.md index db8c707..bbbdb8d 100644 --- a/iredis/data/commands/eval_ro.md +++ b/iredis/data/commands/eval_ro.md @@ -1,9 +1,8 @@ -This is a read-only variant of the `EVAL` command that isn't allowed to execute -commands that modify data. +This is a read-only variant of the `EVAL` command that cannot execute commands that modify data. -Unlike `EVAL`, scripts executed with this command can always be killed and never -affect the replication stream. Because it can only read data, this command can -always be executed on a master or a replica. +For more information about when to use this command vs `EVAL`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). + +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @examples @@ -15,5 +14,5 @@ OK "Hello" > EVAL_RO "return redis.call('DEL', KEYS[1])" 1 mykey -(error) ERR Error running script (call to f_359f69785f876b7f3f60597d81534f3d6c403284): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts +(error) ERR Error running script (call to b0d697da25b13e49157b2c214a4033546aba2104): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts. ``` diff --git a/iredis/data/commands/evalsha.md b/iredis/data/commands/evalsha.md index b3bb515..c8b2329 100644 --- a/iredis/data/commands/evalsha.md +++ b/iredis/data/commands/evalsha.md @@ -1,3 +1,6 @@ -Evaluates a script cached on the server side by its SHA1 digest. Scripts are -cached on the server side using the `SCRIPT LOAD` command. The command is -otherwise identical to `EVAL`. +Evaluate a script from the server's cache by its SHA1 digest. + +The server caches scripts by using the `SCRIPT LOAD` command. +The command is otherwise identical to `EVAL`. + +Please refer to the [Redis Programmability](/topics/programmability) and [Introduction to Eval Scripts](/topics/eval-intro) for more information about Lua scripts. diff --git a/iredis/data/commands/evalsha_ro.md b/iredis/data/commands/evalsha_ro.md index 04368f3..ccb45d6 100644 --- a/iredis/data/commands/evalsha_ro.md +++ b/iredis/data/commands/evalsha_ro.md @@ -1,6 +1,5 @@ -This is a read-only variant of the `EVALSHA` command that isn't allowed to -execute commands that modify data. +This is a read-only variant of the `EVALSHA` command that cannot execute commands that modify data. -Unlike `EVALSHA`, scripts executed with this command can always be killed and -never affect the replication stream. Because it can only read data, this command -can always be executed on a master or a replica. +For more information about when to use this command vs `EVALSHA`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). + +For more information about `EVALSHA` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/iredis/data/commands/exec.md b/iredis/data/commands/exec.md index b35d04a..b2f58fe 100644 --- a/iredis/data/commands/exec.md +++ b/iredis/data/commands/exec.md @@ -10,7 +10,7 @@ not modified, allowing for a [check-and-set mechanism][ttc]. @return -@array-reply: each element being the reply to each of the commands in the atomic -transaction. +@array-reply: each element being the reply to each of the commands in the +atomic transaction. When using `WATCH`, `EXEC` can return a @nil-reply if the execution was aborted. diff --git a/iredis/data/commands/exists.md b/iredis/data/commands/exists.md index 83c2043..a9a89af 100644 --- a/iredis/data/commands/exists.md +++ b/iredis/data/commands/exists.md @@ -1,26 +1,10 @@ Returns if `key` exists. -Since Redis 3.0.3 it is possible to specify multiple keys instead of a single -one. In such a case, it returns the total number of keys existing. Note that -returning 1 or 0 for a single key is just a special case of the variadic usage, -so the command is completely backward compatible. - -The user should be aware that if the same existing key is mentioned in the -arguments multiple times, it will be counted multiple times. So if `somekey` -exists, `EXISTS somekey somekey` will return 2. +The user should be aware that if the same existing key is mentioned in the arguments multiple times, it will be counted multiple times. So if `somekey` exists, `EXISTS somekey somekey` will return 2. @return -@integer-reply, specifically: - -- `1` if the key exists. -- `0` if the key does not exist. - -Since Redis 3.0.3 the command accepts a variable number of keys and the return -value is generalized: - -- The number of keys existing among the ones specified as arguments. Keys - mentioned multiple times and existing are counted multiple times. +@integer-reply, specifically the number of keys that exist from those specified as arguments. @examples diff --git a/iredis/data/commands/expire.md b/iredis/data/commands/expire.md index e5ca954..ddb4c9a 100644 --- a/iredis/data/commands/expire.md +++ b/iredis/data/commands/expire.md @@ -1,14 +1,16 @@ -Set a timeout on `key`. After the timeout has expired, the key will -automatically be deleted. A key with an associated timeout is often said to be -_volatile_ in Redis terminology. +Set a timeout on `key`. +After the timeout has expired, the key will automatically be deleted. +A key with an associated timeout is often said to be _volatile_ in Redis +terminology. The timeout will only be cleared by commands that delete or overwrite the contents of the key, including `DEL`, `SET`, `GETSET` and all the `*STORE` -commands. This means that all the operations that conceptually _alter_ the value -stored at the key without replacing it with a new one will leave the timeout -untouched. For instance, incrementing the value of a key with `INCR`, pushing a -new value into a list with `LPUSH`, or altering the field value of a hash with -`HSET` are all operations that will leave the timeout untouched. +commands. +This means that all the operations that conceptually _alter_ the value stored at +the key without replacing it with a new one will leave the timeout untouched. +For instance, incrementing the value of a key with `INCR`, pushing a new value +into a list with `LPUSH`, or altering the field value of a hash with `HSET` are +all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, using the `PERSIST` command. @@ -29,19 +31,32 @@ will be `del`, not `expired`). [del]: /commands/del [ntf]: /topics/notifications +## Options + +The `EXPIRE` command supports a set of options: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + ## Refreshing expires It is possible to call `EXPIRE` using as argument a key that already has an -existing expire set. In this case the time to live of a key is _updated_ to the -new value. There are many useful applications for this, an example is documented -in the _Navigation session_ pattern section below. +existing expire set. +In this case the time to live of a key is _updated_ to the new value. +There are many useful applications for this, an example is documented in the +_Navigation session_ pattern section below. ## Differences in Redis prior 2.1.3 In Redis versions prior **2.1.3** altering a key with an expire set using a -command altering its value had the effect of removing the key entirely. This -semantics was needed because of limitations in the replication layer that are -now fixed. +command altering its value had the effect of removing the key entirely. +This semantics was needed because of limitations in the replication layer that +are now fixed. `EXPIRE` would return 0 and not alter the timeout for a key with a timeout set. @@ -49,8 +64,8 @@ now fixed. @integer-reply, specifically: -- `1` if the timeout was set. -- `0` if `key` does not exist. +* `1` if the timeout was set. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -60,16 +75,21 @@ EXPIRE mykey 10 TTL mykey SET mykey "Hello World" TTL mykey +EXPIRE mykey 10 XX +TTL mykey +EXPIRE mykey 10 NX +TTL mykey ``` ## Pattern: Navigation session Imagine you have a web service and you are interested in the latest N pages _recently_ visited by your users, such that each adjacent page view was not -performed more than 60 seconds after the previous. Conceptually you may consider -this set of page views as a _Navigation session_ of your user, that may contain -interesting information about what kind of products he or she is looking for -currently, so that you can recommend related products. +performed more than 60 seconds after the previous. +Conceptually you may consider this set of page views as a _Navigation session_ +of your user, that may contain interesting information about what kind of +products he or she is looking for currently, so that you can recommend related +products. You can easily model this pattern in Redis using the following strategy: every time the user does a page view you call the following commands: @@ -92,14 +112,14 @@ using `RPUSH`. ## Keys with an expire -Normally Redis keys are created without an associated time to live. The key will -simply live forever, unless it is removed by the user in an explicit way, for -instance using the `DEL` command. +Normally Redis keys are created without an associated time to live. +The key will simply live forever, unless it is removed by the user in an +explicit way, for instance using the `DEL` command. The `EXPIRE` family of commands is able to associate an expire to a given key, -at the cost of some additional memory used by the key. When a key has an expire -set, Redis will make sure to remove the key when the specified amount of time -elapsed. +at the cost of some additional memory used by the key. +When a key has an expire set, Redis will make sure to remove the key when the +specified amount of time elapsed. The key time to live can be updated or entirely removed using the `EXPIRE` and `PERSIST` command (or other strictly related commands). @@ -114,12 +134,13 @@ Since Redis 2.6 the expire error is from 0 to 1 milliseconds. ## Expires and persistence Keys expiring information is stored as absolute Unix timestamps (in milliseconds -in case of Redis version 2.6 or greater). This means that the time is flowing -even when the Redis instance is not active. +in case of Redis version 2.6 or greater). +This means that the time is flowing even when the Redis instance is not active. -For expires to work well, the computer time must be taken stable. If you move an -RDB file from two computers with a big desync in their clocks, funny things may -happen (like all the keys loaded to be expired at loading time). +For expires to work well, the computer time must be taken stable. +If you move an RDB file from two computers with a big desync in their clocks, +funny things may happen (like all the keys loaded to be expired at loading +time). Even running instances will always check the computer clock, so for instance if you set a key with a time to live of 1000 seconds, and then set your computer @@ -134,9 +155,10 @@ A key is passively expired simply when some client tries to access it, and the key is found to be timed out. Of course this is not enough as there are expired keys that will never be -accessed again. These keys should be expired anyway, so periodically Redis tests -a few keys at random among keys with an expire set. All the keys that are -already expired are deleted from the keyspace. +accessed again. +These keys should be expired anyway, so periodically Redis tests a few keys at +random among keys with an expire set. +All the keys that are already expired are deleted from the keyspace. Specifically this is what Redis does 10 times per second: @@ -156,8 +178,9 @@ second divided by 4. In order to obtain a correct behavior without sacrificing consistency, when a key expires, a `DEL` operation is synthesized in both the AOF file and gains all -the attached replicas nodes. This way the expiration process is centralized in -the master instance, and there is no chance of consistency errors. +the attached replicas nodes. +This way the expiration process is centralized in the master instance, and there +is no chance of consistency errors. However while the replicas connected to a master will not expire keys independently (but will wait for the `DEL` coming from the master), they'll diff --git a/iredis/data/commands/expireat.md b/iredis/data/commands/expireat.md index 92c4e9a..cbc10c6 100644 --- a/iredis/data/commands/expireat.md +++ b/iredis/data/commands/expireat.md @@ -11,15 +11,28 @@ Please for the specific semantics of the command refer to the documentation of ## Background `EXPIREAT` was introduced in order to convert relative timeouts to absolute -timeouts for the AOF persistence mode. Of course, it can be used directly to -specify that a given key should expire at a given time in the future. +timeouts for the AOF persistence mode. +Of course, it can be used directly to specify that a given key should expire at +a given time in the future. + +## Options + +The `EXPIREAT` command supports a set of options: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. @return @integer-reply, specifically: -- `1` if the timeout was set. -- `0` if `key` does not exist. +* `1` if the timeout was set. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples diff --git a/iredis/data/commands/expiretime.md b/iredis/data/commands/expiretime.md index 1c4c815..b524dcc 100644 --- a/iredis/data/commands/expiretime.md +++ b/iredis/data/commands/expiretime.md @@ -1,17 +1,13 @@ -Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which -the given key will expire. +Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key will expire. -See also the `PEXPIRETIME` command which returns the same information with -milliseconds resolution. +See also the `PEXPIRETIME` command which returns the same information with milliseconds resolution. @return -@integer-reply: Expiration Unix timestamp in seconds, or a negative value in -order to signal an error (see the description below). +@integer-reply: Expiration Unix timestamp in seconds, or a negative value in order to signal an error (see the description below). -- The command returns `-1` if the key exists but has no associated expiration - time. -- The command returns `-2` if the key does not exist. +* The command returns `-1` if the key exists but has no associated expiration time. +* The command returns `-2` if the key does not exist. @examples diff --git a/iredis/data/commands/failover.md b/iredis/data/commands/failover.md index 28be506..719d199 100644 --- a/iredis/data/commands/failover.md +++ b/iredis/data/commands/failover.md @@ -1,84 +1,48 @@ -This command will start a coordinated failover between the -currently-connected-to master and one of its replicas. The failover is not -synchronous, instead a background task will handle coordinating the failover. It -is designed to limit data loss and unavailability of the cluster during the -failover. This command is analogous to the `CLUSTER FAILOVER` command for -non-clustered Redis and is similar to the failover support provided by sentinel. +This command will start a coordinated failover between the currently-connected-to master and one of its replicas. +The failover is not synchronous, instead a background task will handle coordinating the failover. +It is designed to limit data loss and unavailability of the cluster during the failover. +This command is analogous to the `CLUSTER FAILOVER` command for non-clustered Redis and is similar to the failover support provided by sentinel. The specific details of the default failover flow are as follows: -1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause - incoming writes and prevent the accumulation of new data in the replication - stream. -2. The master will monitor its replicas, waiting for a replica to indicate that - it has fully consumed the replication stream. If the master has multiple - replicas, it will only wait for the first replica to catch up. -3. The master will then demote itself to a replica. This is done to prevent any - dual master scenarios. NOTE: The master will not discard its data, so it will - be able to rollback if the replica rejects the failover request in the next - step. -4. The previous master will send a special PSYNC request to the target replica, - `PSYNC FAILOVER`, instructing the target replica to become a master. -5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was - accepted it will unpause its clients. If the PSYNC request is rejected, the - master will abort the failover and return to normal. +1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause incoming writes and prevent the accumulation of new data in the replication stream. +2. The master will monitor its replicas, waiting for a replica to indicate that it has fully consumed the replication stream. If the master has multiple replicas, it will only wait for the first replica to catch up. +3. The master will then demote itself to a replica. This is done to prevent any dual master scenarios. NOTE: The master will not discard its data, so it will be able to rollback if the replica rejects the failover request in the next step. +4. The previous master will send a special PSYNC request to the target replica, `PSYNC FAILOVER`, instructing the target replica to become a master. +5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was accepted it will unpause its clients. If the PSYNC request is rejected, the master will abort the failover and return to normal. -The field `master_failover_state` in `INFO replication` can be used to track the -current state of the failover, which has the following values: +The field `master_failover_state` in `INFO replication` can be used to track the current state of the failover, which has the following values: -- `no-failover`: There is no ongoing coordinated failover. -- `waiting-for-sync`: The master is waiting for the replica to catch up to its - replication offset. -- `failover-in-progress`: The master has demoted itself, and is attempting to - hand off ownership to a target replica. +* `no-failover`: There is no ongoing coordinated failover. +* `waiting-for-sync`: The master is waiting for the replica to catch up to its replication offset. +* `failover-in-progress`: The master has demoted itself, and is attempting to hand off ownership to a target replica. -If the previous master had additional replicas attached to it, they will -continue replicating from it as chained replicas. You will need to manually -execute a `REPLICAOF` on these replicas to start replicating directly from the -new master. +If the previous master had additional replicas attached to it, they will continue replicating from it as chained replicas. You will need to manually execute a `REPLICAOF` on these replicas to start replicating directly from the new master. ## Optional arguments +The following optional arguments exist to modify the behavior of the failover flow: -The following optional arguments exist to modify the behavior of the failover -flow: +* `TIMEOUT` *milliseconds* -- This option allows specifying a maximum time a master will wait in the `waiting-for-sync` state before aborting the failover attempt and rolling back. +This is intended to set an upper bound on the write outage the Redis cluster can experience. +Failovers typically happen in less than a second, but could take longer if there is a large amount of write traffic or the replica is already behind in consuming the replication stream. +If this value is not specified, the timeout can be considered to be "infinite". -- `TIMEOUT` _milliseconds_ -- This option allows specifying a maximum time a - master will wait in the `waiting-for-sync` state before aborting the failover - attempt and rolling back. This is intended to set an upper bound on the write - outage the Redis cluster can experience. Failovers typically happen in less - than a second, but could take longer if there is a large amount of write - traffic or the replica is already behind in consuming the replication stream. - If this value is not specified, the timeout can be considered to be - "infinite". +* `TO` *HOST* *PORT* -- This option allows designating a specific replica, by its host and port, to failover to. The master will wait specifically for this replica to catch up to its replication offset, and then failover to it. -- `TO` _HOST_ _PORT_ -- This option allows designating a specific replica, by - its host and port, to failover to. The master will wait specifically for this - replica to catch up to its replication offset, and then failover to it. +* `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can also be used to designate that that once the timeout has elapsed, the master should failover to the target replica instead of rolling back. +This can be used for a best-effort attempt at a failover without data loss, but limiting write outage. -- `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can - also be used to designate that that once the timeout has elapsed, the master - should failover to the target replica instead of rolling back. This can be - used for a best-effort attempt at a failover without data loss, but limiting - write outage. - -NOTE: The master will always rollback if the `PSYNC FAILOVER` request is -rejected by the target replica. +NOTE: The master will always rollback if the `PSYNC FAILOVER` request is rejected by the target replica. ## Failover abort -The failover command is intended to be safe from data loss and corruption, but -can encounter some scenarios it can not automatically remediate from and may get -stuck. For this purpose, the `FAILOVER ABORT` command exists, which will abort -an ongoing failover and return the master to its normal state. The command has -no side effects if issued in the `waiting-for-sync` state but can introduce -multi-master scenarios in the `failover-in-progress` state. If a multi-master -scenario is encountered, you will need to manually identify which master has the -latest data and designate it as the master and have the other replicas. +The failover command is intended to be safe from data loss and corruption, but can encounter some scenarios it can not automatically remediate from and may get stuck. +For this purpose, the `FAILOVER ABORT` command exists, which will abort an ongoing failover and return the master to its normal state. +The command has no side effects if issued in the `waiting-for-sync` state but can introduce multi-master scenarios in the `failover-in-progress` state. +If a multi-master scenario is encountered, you will need to manually identify which master has the latest data and designate it as the master and have the other replicas. -NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to -prevent unintended interactions with the failover that might cause data loss. +NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to prevent unintended interactions with the failover that might cause data loss. @return -@simple-string-reply: `OK` if the command was accepted and a coordinated -failover is in progress. An error if the operation cannot be executed. +@simple-string-reply: `OK` if the command was accepted and a coordinated failover is in progress. An error if the operation cannot be executed. diff --git a/iredis/data/commands/fcall.md b/iredis/data/commands/fcall.md new file mode 100644 index 0000000..30e1751 --- /dev/null +++ b/iredis/data/commands/fcall.md @@ -0,0 +1,28 @@ +Invoke a function. + +Functions are loaded to the server with the `FUNCTION LOAD` command. +The first argument is the name of a loaded function. + +The second argument is the number of input key name arguments, followed by all the keys accessed by the function. +In Lua, these names of input keys are available to the function as a table that is the callback's first argument. + +**Important:** +To ensure the correct execution of functions, both in standalone and clustered deployments, all names of keys that a function accesses must be explicitly provided as input key arguments. +The function **should only** access keys whose names are given as input arguments. +Functions **should never** access keys with programmatically-generated names or based on the contents of data structures stored in the database. + +Any additional input argument **should not** represent names of keys. +These are regular arguments and are passed in a Lua table as the callback's second argument. + +For more information please refer to the [Redis Programmability](/topics/programmability) and [Introduction to Redis Functions](/topics/functions-intro) pages. + +@examples + +The following example will create a library named `mylib` with a single function, `myfunc`, that returns the first argument it gets. + +``` +redis> FUNCTION LOAD "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return args[1] end)" +"mylib" +redis> FCALL myfunc 0 hello +"hello" +``` diff --git a/iredis/data/commands/fcall_ro.md b/iredis/data/commands/fcall_ro.md new file mode 100644 index 0000000..576b140 --- /dev/null +++ b/iredis/data/commands/fcall_ro.md @@ -0,0 +1,5 @@ +This is a read-only variant of the `FCALL` command that cannot execute commands that modify data. + +For more information about when to use this command vs `FCALL`, please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). diff --git a/iredis/data/commands/flushall.md b/iredis/data/commands/flushall.md index ff454c1..5a562d0 100644 --- a/iredis/data/commands/flushall.md +++ b/iredis/data/commands/flushall.md @@ -1,26 +1,20 @@ -Delete all the keys of all the existing databases, not just the currently -selected one. This command never fails. +Delete all the keys of all the existing databases, not just the currently selected one. +This command never fails. -By default, `FLUSHALL` will synchronously flush all the databases. Starting with -Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to -"yes" changes the default flush mode to asynchronous. +By default, `FLUSHALL` will synchronously flush all the databases. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. -It is possible to use one of the following modifiers to dictate the flushing -mode explicitly: +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: -- `ASYNC`: flushes the databases asynchronously -- `!SYNC`: flushes the databases synchronously +* `ASYNC`: flushes the databases asynchronously +* `!SYNC`: flushes the databases synchronously -Note: an asynchronous `FLUSHALL` command only deletes keys that were present at -the time the command was invoked. Keys created during an asynchronous flush will -be unaffected. +Note: an asynchronous `FLUSHALL` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. @return @simple-string-reply -@history +## Behavior change history -- `>= 4.0.0`: Added the `ASYNC` flushing mode modifier. -- `>= 6.2.0`: Added the `!SYNC` flushing mode modifier and the - **lazyfree-lazy-user-flush** configuration directive. +* `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive.
\ No newline at end of file diff --git a/iredis/data/commands/flushdb.md b/iredis/data/commands/flushdb.md index ee47cc1..f823563 100644 --- a/iredis/data/commands/flushdb.md +++ b/iredis/data/commands/flushdb.md @@ -1,19 +1,20 @@ -Delete all the keys of the currently selected DB. This command never fails. +Delete all the keys of the currently selected DB. +This command never fails. By default, `FLUSHDB` will synchronously flush all keys from the database. -Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration -directive to "yes" changes the default flush mode to asynchronous. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. -It is possible to use one of the following modifiers to dictate the flushing -mode explicitly: +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: -- `ASYNC`: flushes the database asynchronously -- `!SYNC`: flushes the database synchronously +* `ASYNC`: flushes the database asynchronously +* `!SYNC`: flushes the database synchronously -Note: an asynchronous `FLUSHDB` command only deletes keys that were present at -the time the command was invoked. Keys created during an asynchronous flush will -be unaffected. +Note: an asynchronous `FLUSHDB` command only deletes keys that were present at the time the command was invoked. Keys created during an asynchronous flush will be unaffected. @return @simple-string-reply + +## Behavior change history + +* `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive.
\ No newline at end of file diff --git a/iredis/data/commands/function-delete.md b/iredis/data/commands/function-delete.md new file mode 100644 index 0000000..5b90f81 --- /dev/null +++ b/iredis/data/commands/function-delete.md @@ -0,0 +1,23 @@ +Delete a library and all its functions. + +This command deletes the library called _library-name_ and all functions in it. +If the library doesn't exist, the server returns an error. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@simple-string-reply + +@examples + +``` +redis> FUNCTION LOAD Lua mylib "redis.register_function('myfunc', function(keys, args) return 'hello' end)" +OK +redis> FCALL myfunc 0 +"hello" +redis> FUNCTION DELETE mylib +OK +redis> FCALL myfunc 0 +(error) ERR Function not found +``` diff --git a/iredis/data/commands/function-dump.md b/iredis/data/commands/function-dump.md new file mode 100644 index 0000000..cf144bc --- /dev/null +++ b/iredis/data/commands/function-dump.md @@ -0,0 +1,34 @@ +Return the serialized payload of loaded libraries. +You can restore the serialized payload later with the `FUNCTION RESTORE` command. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@bulk-string-reply: the serialized payload + +@examples + +The following example shows how to dump loaded libraries using `FUNCTION DUMP` and then it calls `FUNCTION FLUSH` deletes all the libraries. +Then, it restores the original libraries from the serialized payload with `FUNCTION RESTORE`. + +``` +redis> FUNCTION DUMP +"\xf6\x05mylib\x03LUA\x00\xc3@D@J\x1aredis.register_function('my@\x0b\x02', @\x06`\x12\x11keys, args) return`\x0c\a[1] end)\n\x00@\n)\x11\xc8|\x9b\xe4" +redis> FUNCTION FLUSH +OK +redis> FUNCTION RESTORE "\xf6\x05mylib\x03LUA\x00\xc3@D@J\x1aredis.register_function('my@\x0b\x02', @\x06`\x12\x11keys, args) return`\x0c\a[1] end)\n\x00@\n)\x11\xc8|\x9b\xe4" +OK +redis> FUNCTION LIST +1) 1) "library_name" + 2) "mylib" + 3) "engine" + 4) "LUA" + 5) "description" + 6) (nil) + 7) "functions" + 8) 1) 1) "name" + 2) "myfunc" + 3) "description" + 4) (nil) +``` diff --git a/iredis/data/commands/function-flush.md b/iredis/data/commands/function-flush.md new file mode 100644 index 0000000..38c412a --- /dev/null +++ b/iredis/data/commands/function-flush.md @@ -0,0 +1,12 @@ +Deletes all the libraries. + +Unless called with the optional mode argument, the `lazyfree-lazy-user-flush` configuration directive sets the effective behavior. Valid modes are: + +* `ASYNC`: Asynchronously flush the libraries. +* `!SYNC`: Synchronously flush the libraries. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@simple-string-reply diff --git a/iredis/data/commands/function-help.md b/iredis/data/commands/function-help.md new file mode 100644 index 0000000..38c300d --- /dev/null +++ b/iredis/data/commands/function-help.md @@ -0,0 +1,5 @@ +The `FUNCTION HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/function-kill.md b/iredis/data/commands/function-kill.md new file mode 100644 index 0000000..2db5ea5 --- /dev/null +++ b/iredis/data/commands/function-kill.md @@ -0,0 +1,10 @@ +Kill a function that is currently executing. + + +The `FUNCTION KILL` command can be used only on functions that did not modify the dataset during their execution (since stopping a read-only function does not violate the scripting engine's guaranteed atomicity). + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@simple-string-reply diff --git a/iredis/data/commands/function-list.md b/iredis/data/commands/function-list.md new file mode 100644 index 0000000..bb66dba --- /dev/null +++ b/iredis/data/commands/function-list.md @@ -0,0 +1,21 @@ +Return information about the functions and libraries. + +You can use the optional `LIBRARYNAME` argument to specify a pattern for matching library names. +The optional `WITHCODE` modifier will cause the server to include the libraries source implementation in the reply. + +The following information is provided for each of the libraries in the response: + +* **library_name:** the name of the library. +* **engine:** the engine of the library. +* **functions:** the list of functions in the library. + Each function has the following fields: + * **name:** the name of the function. + * **description:** the function's description. + * **flags:** an array of [function flags](/docs/manual/programmability/functions-intro/#function-flags). +* **library_code:** the library's source code (when given the `WITHCODE` modifier). + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@array-reply diff --git a/iredis/data/commands/function-load.md b/iredis/data/commands/function-load.md new file mode 100644 index 0000000..16f125b --- /dev/null +++ b/iredis/data/commands/function-load.md @@ -0,0 +1,36 @@ +Load a library to Redis. + +The command's gets a single mandatory parameter which is the source code that implements the library. +The library payload must start with Shebang statement that provides a metadata about the library (like the engine to use and the library name). +Shebang format: `#!<engine name> name=<library name>`. Currently engine name must be `lua`. + +For the Lua engine, the implementation should declare one or more entry points to the library with the [`redis.register_function()` API](/topics/lua-api#redis.register_function). +Once loaded, you can call the functions in the library with the `FCALL` (or `FCALL_RO` when applicable) command. + +When attempting to load a library with a name that already exists, the Redis server returns an error. +The `REPLACE` modifier changes this behavior and overwrites the existing library with the new contents. + +The command will return an error in the following circumstances: + +* An invalid _engine-name_ was provided. +* The library's name already exists without the `REPLACE` modifier. +* A function in the library is created with a name that already exists in another library (even when `REPLACE` is specified). +* The engine failed in creating the library's functions (due to a compilation error, for example). +* No functions were declared by the library. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@string - the library name that was loaded + +@examples + +The following example will create a library named `mylib` with a single function, `myfunc`, that returns the first argument it gets. + +``` +redis> FUNCTION LOAD "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return args[1] end)" +mylib +redis> FCALL myfunc 0 hello +"hello" +``` diff --git a/iredis/data/commands/function-restore.md b/iredis/data/commands/function-restore.md new file mode 100644 index 0000000..2868d16 --- /dev/null +++ b/iredis/data/commands/function-restore.md @@ -0,0 +1,15 @@ +Restore libraries from the serialized payload. + +You can use the optional _policy_ argument to provide a policy for handling existing libraries. +The following policies are allowed: + +* **APPEND:** appends the restored libraries to the existing libraries and aborts on collision. + This is the default policy. +* **FLUSH:** deletes all existing libraries before restoring the payload. +* **REPLACE:** appends the restored libraries to the existing libraries, replacing any existing ones in case of name collisions. Note that this policy doesn't prevent function name collisions, only libraries. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@simple-string-reply diff --git a/iredis/data/commands/function-stats.md b/iredis/data/commands/function-stats.md new file mode 100644 index 0000000..005b47b --- /dev/null +++ b/iredis/data/commands/function-stats.md @@ -0,0 +1,21 @@ +Return information about the function that's currently running and information about the available execution engines. + +The reply is map with two keys: + +1. `running_script`: information about the running script. + If there's no in-flight function, the server replies with a _nil_. + Otherwise, this is a map with the following keys: + * **name:** the name of the function. + * **command:** the command and arguments used for invoking the function. + * **duration_ms:** the function's runtime duration in milliseconds. +2. `engines`: this is a map of maps. Each entry in the map represent a single engine. + Engine map contains statistics about the engine like number of functions and number of libraries. + + +You can use this command to inspect the invocation of a long-running function and decide whether kill it with the `FUNCTION KILL` command. + +For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). + +@return + +@array-reply
\ No newline at end of file diff --git a/iredis/data/commands/function.md b/iredis/data/commands/function.md new file mode 100644 index 0000000..36ccd9b --- /dev/null +++ b/iredis/data/commands/function.md @@ -0,0 +1,3 @@ +This is a container command for function commands. + +To see the list of available commands you can call `FUNCTION HELP`.
\ No newline at end of file diff --git a/iredis/data/commands/geoadd.md b/iredis/data/commands/geoadd.md index 65365bb..ecdd6e8 100644 --- a/iredis/data/commands/geoadd.md +++ b/iredis/data/commands/geoadd.md @@ -1,72 +1,50 @@ -Adds the specified geospatial items (longitude, latitude, name) to the specified -key. Data is stored into the key as a sorted set, in a way that makes it -possible to query the items with the `GEOSEARCH` command. +Adds the specified geospatial items (longitude, latitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to query the items with the `GEOSEARCH` command. -The command takes arguments in the standard format x,y so the longitude must be -specified before the latitude. There are limits to the coordinates that can be -indexed: areas very near to the poles are not indexable. +The command takes arguments in the standard format x,y so the longitude must be specified before the latitude. There are limits to the coordinates that can be indexed: areas very near to the poles are not indexable. -The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the -following: +The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following: -- Valid longitudes are from -180 to 180 degrees. -- Valid latitudes are from -85.05112878 to 85.05112878 degrees. +* Valid longitudes are from -180 to 180 degrees. +* Valid latitudes are from -85.05112878 to 85.05112878 degrees. -The command will report an error when the user attempts to index coordinates -outside the specified ranges. +The command will report an error when the user attempts to index coordinates outside the specified ranges. -**Note:** there is no **GEODEL** command because you can use `ZREM` to remove -elements. The Geo index structure is just a sorted set. +**Note:** there is no **GEODEL** command because you can use `ZREM` to remove elements. The Geo index structure is just a sorted set. ## GEOADD options `GEOADD` also provides the following options: -- **XX**: Only update elements that already exist. Never add elements. -- **NX**: Don't update already existing elements. Always add new elements. -- **CH**: Modify the return value from the number of new elements added, to the - total number of elements changed (CH is an abbreviation of _changed_). Changed - elements are **new elements added** and elements already existing for which - **the coordinates was updated**. So elements specified in the command line - having the same score as they had in the past are not counted. Note: normally, - the return value of `GEOADD` only counts the number of new elements added. +* **XX**: Only update elements that already exist. Never add elements. +* **NX**: Don't update already existing elements. Always add new elements. +* **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the coordinates was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally, the return value of `GEOADD` only counts the number of new elements added. Note: The **XX** and **NX** options are mutually exclusive. -## How does it work? +How does it work? +--- The way the sorted set is populated is using a technique called -[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude bits -are interleaved to form a unique 52-bit integer. We know that a sorted set -double score can represent a 52-bit integer without losing precision. +[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude +bits are interleaved to form a unique 52-bit integer. We know +that a sorted set double score can represent a 52-bit integer without losing +precision. -This format allows for bounding box and radius querying by checking the 1+8 -areas needed to cover the whole shape and discarding elements outside it. The -areas are checked by calculating the range of the box covered, removing enough -bits from the less significant part of the sorted set score, and computing the -score range to query in the sorted set for each area. +This format allows for bounding box and radius querying by checking the 1+8 areas needed to cover the whole shape and discarding elements outside it. The areas are checked by calculating the range of the box covered, removing enough bits from the less significant part of the sorted set score, and computing the score range to query in the sorted set for each area. -## What Earth model does it use? +What Earth model does it use? +--- -The model assumes that the Earth is a sphere since it uses the Haversine formula -to calculate distance. This formula is only an approximation when applied to the -Earth, which is not a perfect sphere. The introduced errors are not an issue -when used, for example, by social networks and similar applications requiring -this type of querying. However, in the worst case, the error may be up to 0.5%, -so you may want to consider other systems for error-critical applications. +The model assumes that the Earth is a sphere since it uses the Haversine formula to calculate distance. This formula is only an approximation when applied to the Earth, which is not a perfect sphere. +The introduced errors are not an issue when used, for example, by social networks and similar applications requiring this type of querying. +However, in the worst case, the error may be up to 0.5%, so you may want to consider other systems for error-critical applications. @return @integer-reply, specifically: -- When used without optional arguments, the number of elements added to the - sorted set (excluding score updates). -- If the `CH` option is specified, the number of elements that were changed - (added or updated). - -@history - -- `>= 6.2`: Added the `CH`, `NX` and `XX` options. +* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). +* If the `CH` option is specified, the number of elements that were changed (added or updated). @examples diff --git a/iredis/data/commands/geodist.md b/iredis/data/commands/geodist.md index 3f32a89..af78cdf 100644 --- a/iredis/data/commands/geodist.md +++ b/iredis/data/commands/geodist.md @@ -1,28 +1,24 @@ -Return the distance between two members in the geospatial index represented by -the sorted set. +Return the distance between two members in the geospatial index represented by the sorted set. -Given a sorted set representing a geospatial index, populated using the `GEOADD` -command, the command returns the distance between the two specified members in -the specified unit. +Given a sorted set representing a geospatial index, populated using the `GEOADD` command, the command returns the distance between the two specified members in the specified unit. If one or both the members are missing, the command returns NULL. The unit must be one of the following, and defaults to meters: -- **m** for meters. -- **km** for kilometers. -- **mi** for miles. -- **ft** for feet. +* **m** for meters. +* **km** for kilometers. +* **mi** for miles. +* **ft** for feet. -The distance is computed assuming that the Earth is a perfect sphere, so errors -up to 0.5% are possible in edge cases. +The distance is computed assuming that the Earth is a perfect sphere, so errors up to 0.5% are possible in edge cases. @return @bulk-string-reply, specifically: -The command returns the distance as a double (represented as a string) in the -specified unit, or NULL if one or both the elements are missing. +The command returns the distance as a double (represented as a string) +in the specified unit, or NULL if one or both the elements are missing. @examples diff --git a/iredis/data/commands/geohash.md b/iredis/data/commands/geohash.md index f091912..a99ade2 100644 --- a/iredis/data/commands/geohash.md +++ b/iredis/data/commands/geohash.md @@ -1,28 +1,22 @@ -Return valid [Geohash](https://en.wikipedia.org/wiki/Geohash) strings -representing the position of one or more elements in a sorted set value -representing a geospatial index (where elements were added using `GEOADD`). +Return valid [Geohash](https://en.wikipedia.org/wiki/Geohash) strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using `GEOADD`). Normally Redis represents positions of elements using a variation of the Geohash technique where positions are encoded using 52 bit integers. The encoding is also different compared to the standard because the initial min and max coordinates used during the encoding and decoding process are different. This command however **returns a standard Geohash** in the form of a string as -described in the [Wikipedia article](https://en.wikipedia.org/wiki/Geohash) and -compatible with the [geohash.org](http://geohash.org) web site. +described in the [Wikipedia article](https://en.wikipedia.org/wiki/Geohash) and compatible with the [geohash.org](http://geohash.org) web site. -## Geohash string properties +Geohash string properties +--- -The command returns 11 characters Geohash strings, so no precision is loss +The command returns 11 characters Geohash strings, so no precision is lost compared to the Redis internal 52 bit representation. The returned Geohashes have the following properties: -1. They can be shortened removing characters from the right. It will lose - precision but will still point to the same area. -2. It is possible to use them in `geohash.org` URLs such as - `http://geohash.org/<geohash-string>`. This is an - [example of such URL](http://geohash.org/sqdtr74hyu0). -3. Strings with a similar prefix are nearby, but the contrary is not true, it is - possible that strings with different prefixes are nearby too. +1. They can be shortened removing characters from the right. It will lose precision but will still point to the same area. +2. It is possible to use them in `geohash.org` URLs such as `http://geohash.org/<geohash-string>`. This is an [example of such URL](http://geohash.org/sqdtr74hyu0). +3. Strings with a similar prefix are nearby, but the contrary is not true, it is possible that strings with different prefixes are nearby too. @return diff --git a/iredis/data/commands/geopos.md b/iredis/data/commands/geopos.md index 14b941c..19dd377 100644 --- a/iredis/data/commands/geopos.md +++ b/iredis/data/commands/geopos.md @@ -1,22 +1,16 @@ -Return the positions (longitude,latitude) of all the specified members of the -geospatial index represented by the sorted set at _key_. +Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at *key*. -Given a sorted set representing a geospatial index, populated using the `GEOADD` -command, it is often useful to obtain back the coordinates of specified members. -When the geospatial index is populated via `GEOADD` the coordinates are -converted into a 52 bit geohash, so the coordinates returned may not be exactly -the ones used in order to add the elements, but small errors may be introduced. +Given a sorted set representing a geospatial index, populated using the `GEOADD` command, it is often useful to obtain back the coordinates of specified members. When the geospatial index is populated via `GEOADD` the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. -The command can accept a variable number of arguments so it always returns an -array of positions even when a single element is specified. +The command can accept a variable number of arguments so it always returns an array of positions even when a single element is specified. @return @array-reply, specifically: The command returns an array where each element is a two elements array -representing longitude and latitude (x,y) of each member name passed as argument -to the command. +representing longitude and latitude (x,y) of each member name passed as +argument to the command. Non existing elements are reported as NULL elements of the array. diff --git a/iredis/data/commands/georadius.md b/iredis/data/commands/georadius.md index fb6db1b..3d0bba4 100644 --- a/iredis/data/commands/georadius.md +++ b/iredis/data/commands/georadius.md @@ -1,106 +1,60 @@ -Return the members of a sorted set populated with geospatial information using -`GEOADD`, which are within the borders of the area specified with the center -location and the maximum distance from the center (the radius). +Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). -As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please -prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code. +This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` variants (see the section below for more information). -This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` -variants (see the section below for more information). - -The common use case for this command is to retrieve geospatial items near a -specified point not farther than a given amount of meters (or other units). This -allows, for example, to suggest mobile users of an application nearby places. +The common use case for this command is to retrieve geospatial items near a specified point not farther than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. The radius is specified in one of the following units: -- **m** for meters. -- **km** for kilometers. -- **mi** for miles. -- **ft** for feet. - -The command optionally returns additional information using the following -options: - -- `WITHDIST`: Also return the distance of the returned items from the specified - center. The distance is returned in the same unit as the unit specified as the - radius argument of the command. -- `WITHCOORD`: Also return the longitude,latitude coordinates of the matching - items. -- `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, - in the form of a 52 bit unsigned integer. This is only useful for low level - hacks or debugging and is otherwise of little interest for the general user. - -The command default is to return unsorted items. Two different sorting methods -can be invoked using the following two options: - -- `ASC`: Sort returned items from the nearest to the farthest, relative to the - center. -- `DESC`: Sort returned items from the farthest to the nearest, relative to the - center. - -By default all the matching items are returned. It is possible to limit the -results to the first N matching items by using the **COUNT `<count>`** option. -When `ANY` is provided the command will return as soon as enough matches are -found, so the results may not be the ones closest to the specified point, but on -the other hand, the effort invested by the server is significantly lower. When -`ANY` is not provided, the command will perform an effort that is proportional -to the number of items matching the specified area and sort them, so to query -very large areas with a very small `COUNT` option may be slow even if just a few -results are returned. - -By default the command returns the items to the client. It is possible to store -the results with one of these options: - -- `!STORE`: Store the items in a sorted set populated with their geospatial - information. -- `!STOREDIST`: Store the items in a sorted set populated with their distance - from the center as a floating point number, in the same unit specified in the - radius. +* **m** for meters. +* **km** for kilometers. +* **mi** for miles. +* **ft** for feet. + +The command optionally returns additional information using the following options: + +* `WITHDIST`: Also return the distance of the returned items from the specified center. The distance is returned in the same unit as the unit specified as the radius argument of the command. +* `WITHCOORD`: Also return the longitude,latitude coordinates of the matching items. +* `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. + +The command default is to return unsorted items. Two different sorting methods can be invoked using the following two options: + +* `ASC`: Sort returned items from the nearest to the farthest, relative to the center. +* `DESC`: Sort returned items from the farthest to the nearest, relative to the center. + +By default all the matching items are returned. It is possible to limit the results to the first N matching items by using the **COUNT `<count>`** option. +When `ANY` is provided the command will return as soon as enough matches are found, +so the results may not be the ones closest to the specified point, but on the other hand, the effort invested by the server is significantly lower. +When `ANY` is not provided, the command will perform an effort that is proportional to the number of items matching the specified area and sort them, +so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. + +By default the command returns the items to the client. It is possible to store the results with one of these options: + +* `!STORE`: Store the items in a sorted set populated with their geospatial information. +* `!STOREDIST`: Store the items in a sorted set populated with their distance from the center as a floating point number, in the same unit specified in the radius. @return @array-reply, specifically: -- Without any `WITH` option specified, the command just returns a linear array - like ["New York","Milan","Paris"]. -- If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command - returns an array of arrays, where each sub-array represents a single item. +* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. +* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. -When additional information is returned as an array of arrays for each item, the -first item in the sub-array is always the name of the returned item. The other -information is returned in the following order as successive elements of the -sub-array. +When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. -1. The distance from the center as a floating point number, in the same unit - specified in the radius. +1. The distance from the center as a floating point number, in the same unit specified in the radius. 2. The geohash integer. 3. The coordinates as a two items x,y array (longitude,latitude). -So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` -will return each item in the following way: +So for example the command `GEORADIUS Sicily 15 37 200 km WITHCOORD WITHDIST` will return each item in the following way: ["Palermo","190.4424",["13.361389338970184","38.115556395496299"]] -## Read only variants - -Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option -they are technically flagged as writing commands in the Redis command table. For -this reason read-only replicas will flag them, and Redis Cluster replicas will -redirect them to the master instance even if the connection is in read only mode -(See the `READONLY` command of Redis Cluster). - -Breaking the compatibility with the past was considered but rejected, at least -for Redis 4.0, so instead two read only variants of the commands were added. -They are exactly like the original commands but refuse the `STORE` and -`STOREDIST` options. The two variants are called `GEORADIUS_RO` and -`GEORADIUSBYMEMBER_RO`, and can safely be used in replicas. - -Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively. +## Read-only variants -@history +Since `GEORADIUS` and `GEORADIUSBYMEMBER` have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). -- `>= 6.2`: Added the `ANY` option for `COUNT`. +Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read-only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`, and can safely be used in replicas. @examples diff --git a/iredis/data/commands/georadius_ro.md b/iredis/data/commands/georadius_ro.md new file mode 100644 index 0000000..d2e3399 --- /dev/null +++ b/iredis/data/commands/georadius_ro.md @@ -0,0 +1,7 @@ +Read-only variant of the `GEORADIUS` command. + +This command is identical to the `GEORADIUS` command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. + +@return + +@array-reply: An array with each entry being the corresponding result of the subcommand given at the same position. diff --git a/iredis/data/commands/georadiusbymember.md b/iredis/data/commands/georadiusbymember.md index 43d0039..5eab55d 100644 --- a/iredis/data/commands/georadiusbymember.md +++ b/iredis/data/commands/georadiusbymember.md @@ -1,19 +1,11 @@ This command is exactly like `GEORADIUS` with the sole difference that instead -of taking, as the center of the area to query, a longitude and latitude value, -it takes the name of a member already existing inside the geospatial index -represented by the sorted set. - -As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please -prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code. +of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. The position of the specified member is used as the center of the query. -Please check the example below and the `GEORADIUS` documentation for more -information about the command and its options. +Please check the example below and the `GEORADIUS` documentation for more information about the command and its options. -Note that `GEORADIUSBYMEMBER_RO` is also available since Redis 3.2.10 and Redis -4.0.0 in order to provide a read-only command that can be used in replicas. See -the `GEORADIUS` page for more information. +Note that `GEORADIUSBYMEMBER_RO` is also available since Redis 3.2.10 and Redis 4.0.0 in order to provide a read-only command that can be used in replicas. See the `GEORADIUS` page for more information. @examples diff --git a/iredis/data/commands/georadiusbymember_ro.md b/iredis/data/commands/georadiusbymember_ro.md new file mode 100644 index 0000000..94a57a8 --- /dev/null +++ b/iredis/data/commands/georadiusbymember_ro.md @@ -0,0 +1,3 @@ +Read-only variant of the `GEORADIUSBYMEMBER` command. + +This command is identical to the `GEORADIUSBYMEMBER` command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. diff --git a/iredis/data/commands/geosearch.md b/iredis/data/commands/geosearch.md index 014f443..972c1c9 100644 --- a/iredis/data/commands/geosearch.md +++ b/iredis/data/commands/geosearch.md @@ -1,69 +1,43 @@ -Return the members of a sorted set populated with geospatial information using -`GEOADD`, which are within the borders of the area specified by a given shape. -This command extends the `GEORADIUS` command, so in addition to searching within -circular areas, it supports searching within rectangular areas. +Return the members of a sorted set populated with geospatial information using `GEOADD`, which are within the borders of the area specified by a given shape. This command extends the `GEORADIUS` command, so in addition to searching within circular areas, it supports searching within rectangular areas. -This command should be used in place of the deprecated `GEORADIUS` and -`GEORADIUSBYMEMBER` commands. +This command should be used in place of the deprecated `GEORADIUS` and `GEORADIUSBYMEMBER` commands. The query's center point is provided by one of these mandatory options: -- `FROMMEMBER`: Use the position of the given existing `<member>` in the sorted - set. -- `FROMLONLAT`: Use the given `<longitude>` and `<latitude>` position. +* `FROMMEMBER`: Use the position of the given existing `<member>` in the sorted set. +* `FROMLONLAT`: Use the given `<longitude>` and `<latitude>` position. The query's shape is provided by one of these mandatory options: -- `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to - given `<radius>`. -- `BYBOX`: Search inside an axis-aligned rectangle, determined by `<height>` and - `<width>`. - -The command optionally returns additional information using the following -options: - -- `WITHDIST`: Also return the distance of the returned items from the specified - center point. The distance is returned in the same unit as specified for the - radius or height and width arguments. -- `WITHCOORD`: Also return the longitude and latitude of the matching items. -- `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, - in the form of a 52 bit unsigned integer. This is only useful for low level - hacks or debugging and is otherwise of little interest for the general user. - -Matching items are returned unsorted by default. To sort them, use one of the -following two options: - -- `ASC`: Sort returned items from the nearest to the farthest, relative to the - center point. -- `DESC`: Sort returned items from the farthest to the nearest, relative to the - center point. - -All matching items are returned by default. To limit the results to the first N -matching items, use the **COUNT `<count>`** option. When the `ANY` option is -used, the command returns as soon as enough matches are found. This means that -the results returned may not be the ones closest to the specified point, but the -effort invested by the server to generate them is significantly less. When `ANY` -is not provided, the command will perform an effort that is proportional to the -number of items matching the specified area and sort them, so to query very -large areas with a very small `COUNT` option may be slow even if just a few -results are returned. +* `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to given `<radius>`. +* `BYBOX`: Search inside an axis-aligned rectangle, determined by `<height>` and `<width>`. + +The command optionally returns additional information using the following options: + +* `WITHDIST`: Also return the distance of the returned items from the specified center point. The distance is returned in the same unit as specified for the radius or height and width arguments. +* `WITHCOORD`: Also return the longitude and latitude of the matching items. +* `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item, in the form of a 52 bit unsigned integer. This is only useful for low level hacks or debugging and is otherwise of little interest for the general user. + +Matching items are returned unsorted by default. To sort them, use one of the following two options: + +* `ASC`: Sort returned items from the nearest to the farthest, relative to the center point. +* `DESC`: Sort returned items from the farthest to the nearest, relative to the center point. + +All matching items are returned by default. To limit the results to the first N matching items, use the **COUNT `<count>`** option. +When the `ANY` option is used, the command returns as soon as enough matches are found. This means that the results returned may not be the ones closest to the specified point, but the effort invested by the server to generate them is significantly less. +When `ANY` is not provided, the command will perform an effort that is proportional to the number of items matching the specified area and sort them, +so to query very large areas with a very small `COUNT` option may be slow even if just a few results are returned. @return @array-reply, specifically: -- Without any `WITH` option specified, the command just returns a linear array - like ["New York","Milan","Paris"]. -- If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command - returns an array of arrays, where each sub-array represents a single item. +* Without any `WITH` option specified, the command just returns a linear array like ["New York","Milan","Paris"]. +* If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command returns an array of arrays, where each sub-array represents a single item. -When additional information is returned as an array of arrays for each item, the -first item in the sub-array is always the name of the returned item. The other -information is returned in the following order as successive elements of the -sub-array. +When additional information is returned as an array of arrays for each item, the first item in the sub-array is always the name of the returned item. The other information is returned in the following order as successive elements of the sub-array. -1. The distance from the center as a floating point number, in the same unit - specified in the shape. +1. The distance from the center as a floating point number, in the same unit specified in the shape. 2. The geohash integer. 3. The coordinates as a two items x,y array (longitude,latitude). @@ -71,7 +45,7 @@ sub-array. ```cli GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" -GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2" +GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2" GEOSEARCH Sicily FROMLONLAT 15 37 BYRADIUS 200 km ASC -GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC +GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC WITHCOORD WITHDIST ``` diff --git a/iredis/data/commands/geosearchstore.md b/iredis/data/commands/geosearchstore.md index 6a34bfa..2a4fc38 100644 --- a/iredis/data/commands/geosearchstore.md +++ b/iredis/data/commands/geosearchstore.md @@ -1,11 +1,22 @@ This command is like `GEOSEARCH`, but stores the result in destination key. -This command comes in place of the now deprecated `GEORADIUS` and -`GEORADIUSBYMEMBER`. +This command comes in place of the now deprecated `GEORADIUS` and `GEORADIUSBYMEMBER`. -By default, it stores the results in the `destination` sorted set with their -geospatial information. +By default, it stores the results in the `destination` sorted set with their geospatial information. -When using the `STOREDIST` option, the command stores the items in a sorted set -populated with their distance from the center of the circle or box, as a -floating-point number, in the same unit specified for that shape. +When using the `STOREDIST` option, the command stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape. + +@return + +@integer-reply: the number of elements in the resulting set. + +@examples + +```cli +GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania" +GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2" +GEOSEARCHSTORE key1 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 +GEOSEARCH key1 FROMLONLAT 15 37 BYBOX 400 400 km ASC WITHCOORD WITHDIST WITHHASH +GEOSEARCHSTORE key2 Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC COUNT 3 STOREDIST +ZRANGE key2 0 -1 WITHSCORES +```
\ No newline at end of file diff --git a/iredis/data/commands/get.md b/iredis/data/commands/get.md index 423aab3..20a3feb 100644 --- a/iredis/data/commands/get.md +++ b/iredis/data/commands/get.md @@ -1,6 +1,7 @@ -Get the value of `key`. If the key does not exist the special value `nil` is -returned. An error is returned if the value stored at `key` is not a string, -because `GET` only handles string values. +Get the value of `key`. +If the key does not exist the special value `nil` is returned. +An error is returned if the value stored at `key` is not a string, because `GET` +only handles string values. @return diff --git a/iredis/data/commands/getbit.md b/iredis/data/commands/getbit.md index 2d05d12..1506af3 100644 --- a/iredis/data/commands/getbit.md +++ b/iredis/data/commands/getbit.md @@ -1,9 +1,10 @@ Returns the bit value at _offset_ in the string value stored at _key_. When _offset_ is beyond the string length, the string is assumed to be a -contiguous space with 0 bits. When _key_ does not exist it is assumed to be an -empty string, so _offset_ is always out of range and the value is also assumed -to be a contiguous space with 0 bits. +contiguous space with 0 bits. +When _key_ does not exist it is assumed to be an empty string, so _offset_ is +always out of range and the value is also assumed to be a contiguous space with +0 bits. @return diff --git a/iredis/data/commands/getdel.md b/iredis/data/commands/getdel.md index fbd9f72..8474e93 100644 --- a/iredis/data/commands/getdel.md +++ b/iredis/data/commands/getdel.md @@ -1,11 +1,9 @@ -Get the value of `key` and delete the key. This command is similar to `GET`, -except for the fact that it also deletes the key on success (if and only if the -key's value type is a string). +Get the value of `key` and delete the key. +This command is similar to `GET`, except for the fact that it also deletes the key on success (if and only if the key's value type is a string). @return -@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an -error if the key's value type isn't a string. +@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an error if the key's value type isn't a string. @examples diff --git a/iredis/data/commands/getex.md b/iredis/data/commands/getex.md index 27dfc3d..89ce809 100644 --- a/iredis/data/commands/getex.md +++ b/iredis/data/commands/getex.md @@ -1,17 +1,15 @@ -Get the value of `key` and optionally set its expiration. `GETEX` is similar to -`GET`, but is a write command with additional options. +Get the value of `key` and optionally set its expiration. +`GETEX` is similar to `GET`, but is a write command with additional options. ## Options The `GETEX` command supports a set of options that modify its behavior: -- `EX` _seconds_ -- Set the specified expire time, in seconds. -- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds. -- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key - will expire, in seconds. -- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the - key will expire, in milliseconds. -- `PERSIST` -- Remove the time to live associated with the key. +* `EX` *seconds* -- Set the specified expire time, in seconds. +* `PX` *milliseconds* -- Set the specified expire time, in milliseconds. +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. +* `PERSIST` -- Remove the time to live associated with the key. @return diff --git a/iredis/data/commands/getrange.md b/iredis/data/commands/getrange.md index d25ac7a..7283def 100644 --- a/iredis/data/commands/getrange.md +++ b/iredis/data/commands/getrange.md @@ -1,10 +1,8 @@ -**Warning**: this command was renamed to `GETRANGE`, it is called `SUBSTR` in -Redis versions `<= 2.0`. - Returns the substring of the string value stored at `key`, determined by the -offsets `start` and `end` (both are inclusive). Negative offsets can be used in -order to provide an offset starting from the end of the string. So -1 means the -last character, -2 the penultimate and so forth. +offsets `start` and `end` (both are inclusive). +Negative offsets can be used in order to provide an offset starting from the end +of the string. +So -1 means the last character, -2 the penultimate and so forth. The function handles out of range requests by limiting the resulting range to the actual length of the string. diff --git a/iredis/data/commands/getset.md b/iredis/data/commands/getset.md index 64b1cba..dd7aee7 100644 --- a/iredis/data/commands/getset.md +++ b/iredis/data/commands/getset.md @@ -1,14 +1,15 @@ Atomically sets `key` to `value` and returns the old value stored at `key`. -Returns an error when `key` exists but does not hold a string value. Any -previous time to live associated with the key is discarded on successful `SET` -operation. +Returns an error when `key` exists but does not hold a string value. Any +previous time to live associated with the key is discarded on successful +`SET` operation. ## Design pattern -`GETSET` can be used together with `INCR` for counting with atomic reset. For -example: a process may call `INCR` against the key `mycounter` every time some -event occurs, but from time to time we need to get the value of the counter and -reset it to zero atomically. This can be done using `GETSET mycounter "0"`: +`GETSET` can be used together with `INCR` for counting with atomic reset. +For example: a process may call `INCR` against the key `mycounter` every time +some event occurs, but from time to time we need to get the value of the counter +and reset it to zero atomically. +This can be done using `GETSET mycounter "0"`: ```cli INCR mycounter @@ -16,13 +17,9 @@ GETSET mycounter "0" GET mycounter ``` -As per Redis 6.2, GETSET is considered deprecated. Please prefer `SET` with -`GET` parameter in new code. - @return -@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not -exist. +@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not exist. @examples diff --git a/iredis/data/commands/hdel.md b/iredis/data/commands/hdel.md index ce1d184..ab6874e 100644 --- a/iredis/data/commands/hdel.md +++ b/iredis/data/commands/hdel.md @@ -1,20 +1,13 @@ -Removes the specified fields from the hash stored at `key`. Specified fields -that do not exist within this hash are ignored. If `key` does not exist, it is -treated as an empty hash and this command returns `0`. +Removes the specified fields from the hash stored at `key`. +Specified fields that do not exist within this hash are ignored. +If `key` does not exist, it is treated as an empty hash and this command returns +`0`. @return @integer-reply: the number of fields that were removed from the hash, not including specified but non existing fields. -@history - -- `>= 2.4`: Accepts multiple `field` arguments. Redis versions older than 2.4 - can only remove a field per call. - - To remove multiple fields from a hash in an atomic fashion in earlier - versions, use a `MULTI` / `EXEC` block. - @examples ```cli diff --git a/iredis/data/commands/hello.md b/iredis/data/commands/hello.md index c229cc1..3eb6597 100644 --- a/iredis/data/commands/hello.md +++ b/iredis/data/commands/hello.md @@ -1,16 +1,16 @@ Switch to a different protocol, optionally authenticating and setting the connection's name, or provide a contextual client report. -Redis version 6 and above supports two protocols: the old protocol, RESP2, and a -new one introduced with Redis 6, RESP3. RESP3 has certain advantages since when -the connection is in this mode, Redis is able to reply with more semantical -replies: for instance, `HGETALL` will return a _map type_, so a client library +Redis version 6 and above supports two protocols: the old protocol, RESP2, and +a new one introduced with Redis 6, RESP3. RESP3 has certain advantages since +when the connection is in this mode, Redis is able to reply with more semantical +replies: for instance, `HGETALL` will return a *map type*, so a client library implementation no longer requires to know in advance to translate the array into a hash before returning it to the caller. For a full coverage of RESP3, please [check this repository](https://github.com/antirez/resp3). -In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do not -need to updated or changed. There are no short term plans to drop support for +In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do +not need to updated or changed. There are no short term plans to drop support for RESP2, although future version may default to RESP3. `HELLO` always replies with a list of current server and connection properties, @@ -53,21 +53,9 @@ command instead of the canonical `PING` when setting up the connection. When called with the optional `protover` argument, this command switches the protocol to the specified version and also accepts the following options: -- `AUTH <username> <password>`: directly authenticate the connection in addition - to switching to the specified protocol version. This makes calling `AUTH` - before `HELLO` unnecessary when setting up a new connection. Note that the - `username` can be set to "default" to authenticate against a server that does - not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to - version 6. -- `SETNAME <clientname>`: this is the equivalent of calling `CLIENT SETNAME`. +* `AUTH <username> <password>`: directly authenticate the connection in addition to switching to the specified protocol version. This makes calling `AUTH` before `HELLO` unnecessary when setting up a new connection. Note that the `username` can be set to "default" to authenticate against a server that does not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to version 6. +* `SETNAME <clientname>`: this is the equivalent of calling `CLIENT SETNAME`. @return -@array-reply: a list of server properties. The reply is a map instead of an -array when RESP3 is selected. The command returns an error if the `protover` -requested does not exist. - -@history - -- `>= 6.2`: `protover` made optional; when called without arguments the command - reports the current connection's context. +@array-reply: a list of server properties. The reply is a map instead of an array when RESP3 is selected. The command returns an error if the `protover` requested does not exist. diff --git a/iredis/data/commands/hexists.md b/iredis/data/commands/hexists.md index 4581b63..f27678a 100644 --- a/iredis/data/commands/hexists.md +++ b/iredis/data/commands/hexists.md @@ -4,8 +4,8 @@ Returns if `field` is an existing field in the hash stored at `key`. @integer-reply, specifically: -- `1` if the hash contains `field`. -- `0` if the hash does not contain `field`, or `key` does not exist. +* `1` if the hash contains `field`. +* `0` if the hash does not contain `field`, or `key` does not exist. @examples diff --git a/iredis/data/commands/hget.md b/iredis/data/commands/hget.md index 12556a1..b8d9101 100644 --- a/iredis/data/commands/hget.md +++ b/iredis/data/commands/hget.md @@ -2,8 +2,8 @@ Returns the value associated with `field` in the hash stored at `key`. @return -@bulk-string-reply: the value associated with `field`, or `nil` when `field` is -not present in the hash or `key` does not exist. +@bulk-string-reply: the value associated with `field`, or `nil` when `field` is not +present in the hash or `key` does not exist. @examples diff --git a/iredis/data/commands/hgetall.md b/iredis/data/commands/hgetall.md index 42e95de..3717f00 100644 --- a/iredis/data/commands/hgetall.md +++ b/iredis/data/commands/hgetall.md @@ -1,11 +1,11 @@ -Returns all fields and values of the hash stored at `key`. In the returned -value, every field name is followed by its value, so the length of the reply is -twice the size of the hash. +Returns all fields and values of the hash stored at `key`. +In the returned value, every field name is followed by its value, so the length +of the reply is twice the size of the hash. @return -@array-reply: list of fields and their values stored in the hash, or an empty -list when `key` does not exist. +@array-reply: list of fields and their values stored in the hash, or an +empty list when `key` does not exist. @examples diff --git a/iredis/data/commands/hincrby.md b/iredis/data/commands/hincrby.md index 71f53cb..3d24c25 100644 --- a/iredis/data/commands/hincrby.md +++ b/iredis/data/commands/hincrby.md @@ -1,6 +1,7 @@ Increments the number stored at `field` in the hash stored at `key` by -`increment`. If `key` does not exist, a new key holding a hash is created. If -`field` does not exist the value is set to `0` before the operation is +`increment`. +If `key` does not exist, a new key holding a hash is created. +If `field` does not exist the value is set to `0` before the operation is performed. The range of values supported by `HINCRBY` is limited to 64 bit signed integers. diff --git a/iredis/data/commands/hincrbyfloat.md b/iredis/data/commands/hincrbyfloat.md index fe58beb..d6eb472 100644 --- a/iredis/data/commands/hincrbyfloat.md +++ b/iredis/data/commands/hincrbyfloat.md @@ -1,11 +1,11 @@ Increment the specified `field` of a hash stored at `key`, and representing a -floating point number, by the specified `increment`. If the increment value is -negative, the result is to have the hash field value **decremented** instead of -incremented. If the field does not exist, it is set to `0` before performing the -operation. An error is returned if one of the following conditions occur: +floating point number, by the specified `increment`. If the increment value +is negative, the result is to have the hash field value **decremented** instead of incremented. +If the field does not exist, it is set to `0` before performing the operation. +An error is returned if one of the following conditions occur: -- The field contains a value of the wrong type (not a string). -- The current field content or the specified increment are not parsable as a +* The field contains a value of the wrong type (not a string). +* The current field content or the specified increment are not parsable as a double precision floating point number. The exact behavior of this command is identical to the one of the `INCRBYFLOAT` diff --git a/iredis/data/commands/hkeys.md b/iredis/data/commands/hkeys.md index 42fd82c..c74b01e 100644 --- a/iredis/data/commands/hkeys.md +++ b/iredis/data/commands/hkeys.md @@ -2,8 +2,8 @@ Returns all field names in the hash stored at `key`. @return -@array-reply: list of fields in the hash, or an empty list when `key` does not -exist. +@array-reply: list of fields in the hash, or an empty list when `key` does +not exist. @examples diff --git a/iredis/data/commands/hmget.md b/iredis/data/commands/hmget.md index 14b8733..b10c43b 100644 --- a/iredis/data/commands/hmget.md +++ b/iredis/data/commands/hmget.md @@ -2,13 +2,13 @@ Returns the values associated with the specified `fields` in the hash stored at `key`. For every `field` that does not exist in the hash, a `nil` value is returned. -Because non-existing keys are treated as empty hashes, running `HMGET` against a -non-existing `key` will return a list of `nil` values. +Because non-existing keys are treated as empty hashes, running `HMGET` against +a non-existing `key` will return a list of `nil` values. @return -@array-reply: list of values associated with the given fields, in the same order -as they are requested. +@array-reply: list of values associated with the given fields, in the same +order as they are requested. ```cli HSET myhash field1 "Hello" diff --git a/iredis/data/commands/hmset.md b/iredis/data/commands/hmset.md index 0b2876a..8cec775 100644 --- a/iredis/data/commands/hmset.md +++ b/iredis/data/commands/hmset.md @@ -1,9 +1,7 @@ Sets the specified fields to their respective values in the hash stored at -`key`. This command overwrites any specified fields already existing in the -hash. If `key` does not exist, a new key holding a hash is created. - -As per Redis 4.0.0, HMSET is considered deprecated. Please prefer `HSET` in new -code. +`key`. +This command overwrites any specified fields already existing in the hash. +If `key` does not exist, a new key holding a hash is created. @return diff --git a/iredis/data/commands/hrandfield.md b/iredis/data/commands/hrandfield.md index c6c82c5..389a109 100644 --- a/iredis/data/commands/hrandfield.md +++ b/iredis/data/commands/hrandfield.md @@ -1,27 +1,19 @@ -When called with just the `key` argument, return a random field from the hash -value stored at `key`. +When called with just the `key` argument, return a random field from the hash value stored at `key`. -If the provided `count` argument is positive, return an array of **distinct -fields**. The array's length is either `count` or the hash's number of fields -(`HLEN`), whichever is lower. +If the provided `count` argument is positive, return an array of **distinct fields**. +The array's length is either `count` or the hash's number of fields (`HLEN`), whichever is lower. -If called with a negative `count`, the behavior changes and the command is -allowed to return the **same field multiple times**. In this case, the number of -returned fields is the absolute value of the specified `count`. +If called with a negative `count`, the behavior changes and the command is allowed to return the **same field multiple times**. +In this case, the number of returned fields is the absolute value of the specified `count`. -The optional `WITHVALUES` modifier changes the reply so it includes the -respective values of the randomly selected hash fields. +The optional `WITHVALUES` modifier changes the reply so it includes the respective values of the randomly selected hash fields. @return -@bulk-string-reply: without the additional `count` argument, the command returns -a Bulk Reply with the randomly selected field, or `nil` when `key` does not -exist. +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected field, or `nil` when `key` does not exist. -@array-reply: when the additional `count` argument is passed, the command -returns an array of fields, or an empty array when `key` does not exist. If the -`WITHVALUES` modifier is used, the reply is a list fields and their values from -the hash. +@array-reply: when the additional `count` argument is passed, the command returns an array of fields, or an empty array when `key` does not exist. +If the `WITHVALUES` modifier is used, the reply is a list fields and their values from the hash. @examples @@ -36,15 +28,12 @@ HRANDFIELD coin -5 WITHVALUES When the `count` argument is a positive value this command behaves as follows: -- No repeated fields are returned. -- If `count` is bigger than the number of fields in the hash, the command will - only return the whole hash without additional fields. -- The order of fields in the reply is not truly random, so it is up to the - client to shuffle them if needed. +* No repeated fields are returned. +* If `count` is bigger than the number of fields in the hash, the command will only return the whole hash without additional fields. +* The order of fields in the reply is not truly random, so it is up to the client to shuffle them if needed. When the `count` is a negative value, the behavior changes as follows: -- Repeating fields are possible. -- Exactly `count` fields, or an empty array if the hash is empty (non-existing - key), are always returned. -- The order of fields in the reply is truly random. +* Repeating fields are possible. +* Exactly `count` fields, or an empty array if the hash is empty (non-existing key), are always returned. +* The order of fields in the reply is truly random. diff --git a/iredis/data/commands/hset.md b/iredis/data/commands/hset.md index a975947..42e15c1 100644 --- a/iredis/data/commands/hset.md +++ b/iredis/data/commands/hset.md @@ -1,9 +1,6 @@ -Sets `field` in the hash stored at `key` to `value`. If `key` does not exist, a -new key holding a hash is created. If `field` already exists in the hash, it is -overwritten. - -As of Redis 4.0.0, HSET is variadic and allows for multiple `field`/`value` -pairs. +Sets `field` in the hash stored at `key` to `value`. +If `key` does not exist, a new key holding a hash is created. +If `field` already exists in the hash, it is overwritten. @return diff --git a/iredis/data/commands/hsetnx.md b/iredis/data/commands/hsetnx.md index 1926178..c60eaa0 100644 --- a/iredis/data/commands/hsetnx.md +++ b/iredis/data/commands/hsetnx.md @@ -1,13 +1,14 @@ Sets `field` in the hash stored at `key` to `value`, only if `field` does not -yet exist. If `key` does not exist, a new key holding a hash is created. If -`field` already exists, this operation has no effect. +yet exist. +If `key` does not exist, a new key holding a hash is created. +If `field` already exists, this operation has no effect. @return @integer-reply, specifically: -- `1` if `field` is a new field in the hash and `value` was set. -- `0` if `field` already exists in the hash and no operation was performed. +* `1` if `field` is a new field in the hash and `value` was set. +* `0` if `field` already exists in the hash and no operation was performed. @examples diff --git a/iredis/data/commands/hstrlen.md b/iredis/data/commands/hstrlen.md index 9a96337..b187f75 100644 --- a/iredis/data/commands/hstrlen.md +++ b/iredis/data/commands/hstrlen.md @@ -1,10 +1,8 @@ -Returns the string length of the value associated with `field` in the hash -stored at `key`. If the `key` or the `field` do not exist, 0 is returned. +Returns the string length of the value associated with `field` in the hash stored at `key`. If the `key` or the `field` do not exist, 0 is returned. @return -@integer-reply: the string length of the value associated with `field`, or zero -when `field` is not present in the hash or `key` does not exist at all. +@integer-reply: the string length of the value associated with `field`, or zero when `field` is not present in the hash or `key` does not exist at all. @examples diff --git a/iredis/data/commands/hvals.md b/iredis/data/commands/hvals.md index 1dbe950..5526959 100644 --- a/iredis/data/commands/hvals.md +++ b/iredis/data/commands/hvals.md @@ -2,8 +2,8 @@ Returns all values in the hash stored at `key`. @return -@array-reply: list of values in the hash, or an empty list when `key` does not -exist. +@array-reply: list of values in the hash, or an empty list when `key` does +not exist. @examples diff --git a/iredis/data/commands/incr.md b/iredis/data/commands/incr.md index 110510e..6abee16 100644 --- a/iredis/data/commands/incr.md +++ b/iredis/data/commands/incr.md @@ -1,14 +1,16 @@ -Increments the number stored at `key` by one. If the key does not exist, it is -set to `0` before performing the operation. An error is returned if the key -contains a value of the wrong type or contains a string that can not be -represented as integer. This operation is limited to 64 bit signed integers. +Increments the number stored at `key` by one. +If the key does not exist, it is set to `0` before performing the operation. +An error is returned if the key contains a value of the wrong type or contains a +string that can not be represented as integer. +This operation is limited to 64 bit signed integers. **Note**: this is a string operation because Redis does not have a dedicated -integer type. The string stored at the key is interpreted as a base-10 **64 bit -signed integer** to execute the operation. +integer type. +The string stored at the key is interpreted as a base-10 **64 bit signed +integer** to execute the operation. -Redis stores integers in their integer representation, so for string values that -actually hold an integer, there is no overhead for storing the string +Redis stores integers in their integer representation, so for string values +that actually hold an integer, there is no overhead for storing the string representation of the integer. @return @@ -26,9 +28,11 @@ GET mykey ## Pattern: Counter The counter pattern is the most obvious thing you can do with Redis atomic -increment operations. The idea is simply send an `INCR` command to Redis every -time an operation occurs. For instance in a web application we may want to know -how many page views this user did every day of the year. +increment operations. +The idea is simply send an `INCR` command to Redis every time an operation +occurs. +For instance in a web application we may want to know how many page views this +user did every day of the year. To do so the web application may simply increment a key every time the user performs a page view, creating the key name concatenating the User ID and a @@ -36,22 +40,22 @@ string representing the current date. This simple pattern can be extended in many ways: -- It is possible to use `INCR` and `EXPIRE` together at every page view to have +* It is possible to use `INCR` and `EXPIRE` together at every page view to have a counter counting only the latest N page views separated by less than the specified amount of seconds. -- A client may use GETSET in order to atomically get the current counter value +* A client may use GETSET in order to atomically get the current counter value and reset it to zero. -- Using other atomic increment/decrement commands like `DECR` or `INCRBY` it is - possible to handle values that may get bigger or smaller depending on the - operations performed by the user. Imagine for instance the score of different - users in an online game. +* Using other atomic increment/decrement commands like `DECR` or `INCRBY` it + is possible to handle values that may get bigger or smaller depending on the + operations performed by the user. + Imagine for instance the score of different users in an online game. ## Pattern: Rate limiter The rate limiter pattern is a special counter that is used to limit the rate at -which an operation can be performed. The classical materialization of this -pattern involves limiting the number of requests that can be performed against a -public API. +which an operation can be performed. +The classical materialization of this pattern involves limiting the number of +requests that can be performed against a public API. We provide two implementations of this pattern using `INCR`, where we assume that the problem to solve is limiting the number of API calls to a maximum of @@ -77,9 +81,10 @@ ELSE END ``` -Basically we have a counter for every IP, for every different second. But this -counters are always incremented setting an expire of 10 seconds so that they'll -be removed by Redis automatically when the current second is a different one. +Basically we have a counter for every IP, for every different second. +But this counters are always incremented setting an expire of 10 seconds so that +they'll be removed by Redis automatically when the current second is a different +one. Note the used of `MULTI` and `EXEC` in order to make sure that we'll both increment and set the expire at every API call. @@ -87,7 +92,8 @@ increment and set the expire at every API call. ## Pattern: Rate limiter 2 An alternative implementation uses a single counter, but is a bit more complex -to get it right without race conditions. We'll examine different variants. +to get it right without race conditions. +We'll examine different variants. ``` FUNCTION LIMIT_API_CALL(ip): @@ -104,13 +110,13 @@ END ``` The counter is created in a way that it only will survive one second, starting -from the first request performed in the current second. If there are more than -10 requests in the same second the counter will reach a value greater than 10, -otherwise it will expire and start again from 0. +from the first request performed in the current second. +If there are more than 10 requests in the same second the counter will reach a +value greater than 10, otherwise it will expire and start again from 0. -**In the above code there is a race condition**. If for some reason the client -performs the `INCR` command but does not perform the `EXPIRE` the key will be -leaked until we'll see the same IP address again. +**In the above code there is a race condition**. +If for some reason the client performs the `INCR` command but does not perform +the `EXPIRE` the key will be leaked until we'll see the same IP address again. This can be fixed easily turning the `INCR` with optional `EXPIRE` into a Lua script that is send using the `EVAL` command (only available since Redis version @@ -124,11 +130,11 @@ if current == 1 then end ``` -There is a different way to fix this issue without using scripting, but using -Redis lists instead of counters. The implementation is more complex and uses -more advanced features but has the advantage of remembering the IP addresses of -the clients currently performing an API call, that may be useful or not -depending on the application. +There is a different way to fix this issue without using scripting, by using +Redis lists instead of counters. +The implementation is more complex and uses more advanced features but has the +advantage of remembering the IP addresses of the clients currently performing an +API call, that may be useful or not depending on the application. ``` FUNCTION LIMIT_API_CALL(ip) @@ -152,5 +158,6 @@ The `RPUSHX` command only pushes the element if the key already exists. Note that we have a race here, but it is not a problem: `EXISTS` may return false but the key may be created by another client before we create it inside -the `MULTI` / `EXEC` block. However this race will just miss an API call under -rare conditions, so the rate limiting will still work correctly. +the `MULTI` / `EXEC` block. +However this race will just miss an API call under rare conditions, so the rate +limiting will still work correctly. diff --git a/iredis/data/commands/incrby.md b/iredis/data/commands/incrby.md index 103dbbf..9734351 100644 --- a/iredis/data/commands/incrby.md +++ b/iredis/data/commands/incrby.md @@ -1,7 +1,8 @@ -Increments the number stored at `key` by `increment`. If the key does not exist, -it is set to `0` before performing the operation. An error is returned if the -key contains a value of the wrong type or contains a string that can not be -represented as integer. This operation is limited to 64 bit signed integers. +Increments the number stored at `key` by `increment`. +If the key does not exist, it is set to `0` before performing the operation. +An error is returned if the key contains a value of the wrong type or contains a +string that can not be represented as integer. +This operation is limited to 64 bit signed integers. See `INCR` for extra information on increment/decrement operations. diff --git a/iredis/data/commands/incrbyfloat.md b/iredis/data/commands/incrbyfloat.md index 4998f5f..9efca1d 100644 --- a/iredis/data/commands/incrbyfloat.md +++ b/iredis/data/commands/incrbyfloat.md @@ -1,11 +1,12 @@ Increment the string representing a floating point number stored at `key` by the -specified `increment`. By using a negative `increment` value, the result is that -the value stored at the key is decremented (by the obvious properties of -addition). If the key does not exist, it is set to `0` before performing the -operation. An error is returned if one of the following conditions occur: - -- The key contains a value of the wrong type (not a string). -- The current key content or the specified increment are not parsable as a +specified `increment`. By using a negative `increment` value, the result is +that the value stored at the key is decremented (by the obvious properties +of addition). +If the key does not exist, it is set to `0` before performing the operation. +An error is returned if one of the following conditions occur: + +* The key contains a value of the wrong type (not a string). +* The current key content or the specified increment are not parsable as a double precision floating point number. If the command is successful the new incremented value is stored as the new @@ -16,7 +17,8 @@ Both the value already contained in the string key and the increment argument can be optionally provided in exponential notation, however the value computed after the increment is stored consistently in the same format, that is, an integer number followed (if needed) by a dot, and a variable number of digits -representing the decimal part of the number. Trailing zeroes are always removed. +representing the decimal part of the number. +Trailing zeroes are always removed. The precision of the output is fixed at 17 digits after the decimal point regardless of the actual internal precision of the computation. diff --git a/iredis/data/commands/info.md b/iredis/data/commands/info.md index 3a24c42..be9b318 100644 --- a/iredis/data/commands/info.md +++ b/iredis/data/commands/info.md @@ -3,25 +3,26 @@ format that is simple to parse by computers and easy to read by humans. The optional parameter can be used to select a specific section of information: -- `server`: General information about the Redis server -- `clients`: Client connections section -- `memory`: Memory consumption related information -- `persistence`: RDB and AOF related information -- `stats`: General statistics -- `replication`: Master/replica replication information -- `cpu`: CPU consumption statistics -- `commandstats`: Redis command statistics -- `cluster`: Redis Cluster section -- `modules`: Modules section -- `keyspace`: Database related statistics -- `modules`: Module related sections -- `errorstats`: Redis error statistics +* `server`: General information about the Redis server +* `clients`: Client connections section +* `memory`: Memory consumption related information +* `persistence`: RDB and AOF related information +* `stats`: General statistics +* `replication`: Master/replica replication information +* `cpu`: CPU consumption statistics +* `commandstats`: Redis command statistics +* `latencystats`: Redis command latency percentile distribution statistics +* `cluster`: Redis Cluster section +* `modules`: Modules section +* `keyspace`: Database related statistics +* `modules`: Module related sections +* `errorstats`: Redis error statistics It can also take the following values: -- `all`: Return all sections (excluding module generated ones) -- `default`: Return only the default set of sections -- `everything`: Includes `all` and `modules` +* `all`: Return all sections (excluding module generated ones) +* `default`: Return only the default set of sections +* `everything`: Includes `all` and `modules` When no parameter is provided, the `default` option is assumed. @@ -38,152 +39,181 @@ INFO ## Notes -Please note depending on the version of Redis some of the fields have been added -or removed. A robust client application should therefore parse the result of -this command by skipping unknown properties, and gracefully handle missing -fields. +Please note depending on the version of Redis some of the fields have been +added or removed. A robust client application should therefore parse the +result of this command by skipping unknown properties, and gracefully handle +missing fields. Here is the description of fields for Redis >= 2.4. + Here is the meaning of all fields in the **server** section: -- `redis_version`: Version of the Redis server -- `redis_git_sha1`: Git SHA1 -- `redis_git_dirty`: Git dirty flag -- `redis_build_id`: The build id -- `redis_mode`: The server's mode ("standalone", "sentinel" or "cluster") -- `os`: Operating system hosting the Redis server -- `arch_bits`: Architecture (32 or 64 bits) -- `multiplexing_api`: Event loop mechanism used by Redis -- `atomicvar_api`: Atomicvar API used by Redis -- `gcc_version`: Version of the GCC compiler used to compile the Redis server -- `process_id`: PID of the server process -- `run_id`: Random value identifying the Redis server (to be used by Sentinel - and Cluster) -- `tcp_port`: TCP/IP listen port -- `server_time_in_usec`: Epoch-based system time with microsecond precision -- `uptime_in_seconds`: Number of seconds since Redis server start -- `uptime_in_days`: Same value expressed in days -- `hz`: The server's current frequency setting -- `configured_hz`: The server's configured frequency setting -- `lru_clock`: Clock incrementing every minute, for LRU management -- `executable`: The path to the server's executable -- `config_file`: The path to the config file +* `redis_version`: Version of the Redis server +* `redis_git_sha1`: Git SHA1 +* `redis_git_dirty`: Git dirty flag +* `redis_build_id`: The build id +* `redis_mode`: The server's mode ("standalone", "sentinel" or "cluster") +* `os`: Operating system hosting the Redis server +* `arch_bits`: Architecture (32 or 64 bits) +* `multiplexing_api`: Event loop mechanism used by Redis +* `atomicvar_api`: Atomicvar API used by Redis +* `gcc_version`: Version of the GCC compiler used to compile the Redis server +* `process_id`: PID of the server process +* `process_supervised`: Supervised system ("upstart", "systemd", "unknown" or "no") +* `run_id`: Random value identifying the Redis server (to be used by Sentinel + and Cluster) +* `tcp_port`: TCP/IP listen port +* `server_time_usec`: Epoch-based system time with microsecond precision +* `uptime_in_seconds`: Number of seconds since Redis server start +* `uptime_in_days`: Same value expressed in days +* `hz`: The server's current frequency setting +* `configured_hz`: The server's configured frequency setting +* `lru_clock`: Clock incrementing every minute, for LRU management +* `executable`: The path to the server's executable +* `config_file`: The path to the config file +* `io_threads_active`: Flag indicating if I/O threads are active +* `shutdown_in_milliseconds`: The maximum time remaining for replicas to catch up the replication before completing the shutdown sequence. + This field is only present during shutdown. Here is the meaning of all fields in the **clients** section: -- `connected_clients`: Number of client connections (excluding connections from - replicas) -- `cluster_connections`: An approximation of the number of sockets used by the - cluster's bus -- `maxclients`: The value of the `maxclients` configuration directive. This is - the upper limit for the sum of `connected_clients`, `connected_slaves` and - `cluster_connections`. -- `client_longest_output_list`: Longest output list among current client - connections -- `client_biggest_input_buf`: Biggest input buffer among current client - connections -- `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`, - `BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`) -- `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`) -- `clients_in_timeout_table`: Number of clients in the clients timeout table -- `io_threads_active`: Flag indicating if I/O threads are active +* `connected_clients`: Number of client connections (excluding connections + from replicas) +* `cluster_connections`: An approximation of the number of sockets used by the + cluster's bus +* `maxclients`: The value of the `maxclients` configuration directive. This is + the upper limit for the sum of `connected_clients`, `connected_slaves` and + `cluster_connections`. +* `client_recent_max_input_buffer`: Biggest input buffer among current client connections +* `client_recent_max_output_buffer`: Biggest output buffer among current client connections +* `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`, + `BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`) +* `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`) +* `clients_in_timeout_table`: Number of clients in the clients timeout table Here is the meaning of all fields in the **memory** section: -- `used_memory`: Total number of bytes allocated by Redis using its allocator - (either standard **libc**, **jemalloc**, or an alternative allocator such as - [**tcmalloc**][hcgcpgp]) -- `used_memory_human`: Human readable representation of previous value -- `used_memory_rss`: Number of bytes that Redis allocated as seen by the - operating system (a.k.a resident set size). This is the number reported by - tools such as `top(1)` and `ps(1)` -- `used_memory_rss_human`: Human readable representation of previous value -- `used_memory_peak`: Peak memory consumed by Redis (in bytes) -- `used_memory_peak_human`: Human readable representation of previous value -- `used_memory_peak_perc`: The percentage of `used_memory_peak` out of - `used_memory` -- `used_memory_overhead`: The sum in bytes of all overheads that the server - allocated for managing its internal data structures -- `used_memory_startup`: Initial amount of memory consumed by Redis at startup - in bytes -- `used_memory_dataset`: The size in bytes of the dataset - (`used_memory_overhead` subtracted from `used_memory`) -- `used_memory_dataset_perc`: The percentage of `used_memory_dataset` out of the - net memory usage (`used_memory` minus `used_memory_startup`) -- `total_system_memory`: The total amount of memory that the Redis host has -- `total_system_memory_human`: Human readable representation of previous value -- `used_memory_lua`: Number of bytes used by the Lua engine -- `used_memory_lua_human`: Human readable representation of previous value -- `used_memory_scripts`: Number of bytes used by cached Lua scripts -- `used_memory_scripts_human`: Human readable representation of previous value -- `maxmemory`: The value of the `maxmemory` configuration directive -- `maxmemory_human`: Human readable representation of previous value -- `maxmemory_policy`: The value of the `maxmemory-policy` configuration - directive -- `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory` -- `mem_allocator`: Memory allocator, chosen at compile time -- `active_defrag_running`: Flag indicating if active defragmentation is active -- `lazyfree_pending_objects`: The number of objects waiting to be freed (as a - result of calling `UNLINK`, or `FLUSHDB` and `FLUSHALL` with the **ASYNC** - option) +* `used_memory`: Total number of bytes allocated by Redis using its + allocator (either standard **libc**, **jemalloc**, or an alternative + allocator such as [**tcmalloc**][hcgcpgp]) +* `used_memory_human`: Human readable representation of previous value +* `used_memory_rss`: Number of bytes that Redis allocated as seen by the + operating system (a.k.a resident set size). This is the number reported by + tools such as `top(1)` and `ps(1)` +* `used_memory_rss_human`: Human readable representation of previous value +* `used_memory_peak`: Peak memory consumed by Redis (in bytes) +* `used_memory_peak_human`: Human readable representation of previous value +* `used_memory_peak_perc`: The percentage of `used_memory_peak` out of + `used_memory` +* `used_memory_overhead`: The sum in bytes of all overheads that the server + allocated for managing its internal data structures +* `used_memory_startup`: Initial amount of memory consumed by Redis at startup + in bytes +* `used_memory_dataset`: The size in bytes of the dataset + (`used_memory_overhead` subtracted from `used_memory`) +* `used_memory_dataset_perc`: The percentage of `used_memory_dataset` out of + the net memory usage (`used_memory` minus `used_memory_startup`) +* `total_system_memory`: The total amount of memory that the Redis host has +* `total_system_memory_human`: Human readable representation of previous value +* `used_memory_lua`: Number of bytes used by the Lua engine +* `used_memory_lua_human`: Human readable representation of previous value +* `used_memory_scripts`: Number of bytes used by cached Lua scripts +* `used_memory_scripts_human`: Human readable representation of previous value +* `maxmemory`: The value of the `maxmemory` configuration directive +* `maxmemory_human`: Human readable representation of previous value +* `maxmemory_policy`: The value of the `maxmemory-policy` configuration + directive +* `mem_fragmentation_ratio`: Ratio between `used_memory_rss` and `used_memory`. + Note that this doesn't only includes fragmentation, but also other process overheads (see the `allocator_*` metrics), and also overheads like code, shared libraries, stack, etc. +* `mem_fragmentation_bytes`: Delta between `used_memory_rss` and `used_memory`. + Note that when the total fragmentation bytes is low (few megabytes), a high ratio (e.g. 1.5 and above) is not an indication of an issue. +* `allocator_frag_ratio:`: Ratio between `allocator_active` and `allocator_allocated`. This is the true (external) fragmentation metric (not `mem_fragmentation_ratio`). +* `allocator_frag_bytes` Delta between `allocator_active` and `allocator_allocated`. See note about `mem_fragmentation_bytes`. +* `allocator_rss_ratio`: Ratio between `allocator_resident` and `allocator_active`. This usually indicates pages that the allocator can and probably will soon release back to the OS. +* `allocator_rss_bytes`: Delta between `allocator_resident` and `allocator_active` +* `rss_overhead_ratio`: Ratio between `used_memory_rss` (the process RSS) and `allocator_resident`. This includes RSS overheads that are not allocator or heap related. +* `rss_overhead_bytes`: Delta between `used_memory_rss` (the process RSS) and `allocator_resident` +* `allocator_allocated`: Total bytes allocated form the allocator, including internal-fragmentation. Normally the same as `used_memory`. +* `allocator_active`: Total bytes in the allocator active pages, this includes external-fragmentation. +* `allocator_resident`: Total bytes resident (RSS) in the allocator, this includes pages that can be released to the OS (by `MEMORY PURGE`, or just waiting). +* `mem_not_counted_for_evict`: Used memory that's not counted for key eviction. This is basically transient replica and AOF buffers. +* `mem_clients_slaves`: Memory used by replica clients - Starting Redis 7.0, replica buffers share memory with the replication backlog, so this field can show 0 when replicas don't trigger an increase of memory usage. +* `mem_clients_normal`: Memory used by normal clients +* `mem_cluster_links`: Memory used by links to peers on the cluster bus when cluster mode is enabled. +* `mem_aof_buffer`: Transient memory used for AOF and AOF rewrite buffers +* `mem_replication_backlog`: Memory used by replication backlog +* `mem_total_replication_buffers`: Total memory consumed for replication buffers - Added in Redis 7.0. +* `mem_allocator`: Memory allocator, chosen at compile time. +* `active_defrag_running`: When `activedefrag` is enabled, this indicates whether defragmentation is currently active, and the CPU percentage it intends to utilize. +* `lazyfree_pending_objects`: The number of objects waiting to be freed (as a + result of calling `UNLINK`, or `FLUSHDB` and `FLUSHALL` with the **ASYNC** + option) +* `lazyfreed_objects`: The number of objects that have been lazy freed. Ideally, the `used_memory_rss` value should be only slightly higher than -`used_memory`. When rss >> used, a large difference means there is memory -fragmentation (internal or external), which can be evaluated by checking -`mem_fragmentation_ratio`. When used >> rss, it means part of Redis memory has -been swapped off by the operating system: expect some significant latencies. +`used_memory`. +When rss >> used, a large difference may mean there is (external) memory fragmentation, which can be evaluated by checking +`allocator_frag_ratio`, `allocator_frag_bytes`. +When used >> rss, it means part of Redis memory has been swapped off by the +operating system: expect some significant latencies. Because Redis does not have control over how its allocations are mapped to memory pages, high `used_memory_rss` is often the result of a spike in memory usage. When Redis frees memory, the memory is given back to the allocator, and the -allocator may or may not give the memory back to the system. There may be a -discrepancy between the `used_memory` value and memory consumption as reported -by the operating system. It may be due to the fact memory has been used and -released by Redis, but not given back to the system. The `used_memory_peak` -value is generally useful to check this point. +allocator may or may not give the memory back to the system. There may be +a discrepancy between the `used_memory` value and memory consumption as +reported by the operating system. It may be due to the fact memory has been +used and released by Redis, but not given back to the system. The +`used_memory_peak` value is generally useful to check this point. Additional introspective information about the server's memory can be obtained by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`. Here is the meaning of all fields in the **persistence** section: -- `loading`: Flag indicating if the load of a dump file is on-going -- `current_cow_size`: The size in bytes of copy-on-write memory while a child - fork is running -- `current_fork_perc`: The percentage of progress of the current fork process. - For AOF and RDB forks it is the percentage of `current_save_keys_processed` - out of `current_save_keys_total`. -- `current_save_keys_processed`: Number of keys processed by the current save - operation -- `current_save_keys_total`: Number of keys at the beginning of the current save - operation -- `rdb_changes_since_last_save`: Number of changes since the last dump -- `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going -- `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save -- `rdb_last_bgsave_status`: Status of the last RDB save operation -- `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds -- `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if - any -- `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last - RDB save operation -- `aof_enabled`: Flag indicating AOF logging is activated -- `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going -- `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be - scheduled once the on-going RDB save is complete. -- `aof_last_rewrite_time_sec`: Duration of the last AOF rewrite operation in - seconds -- `aof_current_rewrite_time_sec`: Duration of the on-going AOF rewrite operation - if any -- `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation -- `aof_last_write_status`: Status of the last write operation to the AOF -- `aof_last_cow_size`: The size in bytes of copy-on-write memory during the last - AOF rewrite operation -- `module_fork_in_progress`: Flag indicating a module fork is on-going -- `module_fork_last_cow_size`: The size in bytes of copy-on-write memory during - the last module fork operation +* `loading`: Flag indicating if the load of a dump file is on-going +* `async_loading`: Currently loading replication data-set asynchronously while serving old data. This means `repl-diskless-load` is enabled and set to `swapdb`. Added in Redis 7.0. +* `current_cow_peak`: The peak size in bytes of copy-on-write memory + while a child fork is running +* `current_cow_size`: The size in bytes of copy-on-write memory + while a child fork is running +* `current_cow_size_age`: The age, in seconds, of the `current_cow_size` value. +* `current_fork_perc`: The percentage of progress of the current fork process. For AOF and RDB forks it is the percentage of `current_save_keys_processed` out of `current_save_keys_total`. +* `current_save_keys_processed`: Number of keys processed by the current save operation +* `current_save_keys_total`: Number of keys at the beginning of the current save operation +* `rdb_changes_since_last_save`: Number of changes since the last dump +* `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going +* `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save +* `rdb_last_bgsave_status`: Status of the last RDB save operation +* `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in + seconds +* `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation + if any +* `rdb_last_cow_size`: The size in bytes of copy-on-write memory during + the last RDB save operation +* `rdb_last_load_keys_expired`: Number volatile keys deleted during the last RDB loading. Added in Redis 7.0. +* `rdb_last_load_keys_loaded`: Number of keys loaded during the last RDB loading. Added in Redis 7.0. +* `aof_enabled`: Flag indicating AOF logging is activated +* `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is + on-going +* `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation + will be scheduled once the on-going RDB save is complete. +* `aof_last_rewrite_time_sec`: Duration of the last AOF rewrite operation in + seconds +* `aof_current_rewrite_time_sec`: Duration of the on-going AOF rewrite + operation if any +* `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation +* `aof_last_write_status`: Status of the last write operation to the AOF +* `aof_last_cow_size`: The size in bytes of copy-on-write memory during + the last AOF rewrite operation +* `module_fork_in_progress`: Flag indicating a module fork is on-going +* `module_fork_last_cow_size`: The size in bytes of copy-on-write memory + during the last module fork operation +* `aof_rewrites`: Number of AOF rewrites performed since startup +* `rdb_saves`: Number of RDB snapshots performed since startup `rdb_changes_since_last_save` refers to the number of operations that produced some kind of changes in the dataset since the last time either `SAVE` or @@ -191,201 +221,206 @@ some kind of changes in the dataset since the last time either `SAVE` or If AOF is activated, these additional fields will be added: -- `aof_current_size`: AOF current file size -- `aof_base_size`: AOF file size on latest startup or rewrite -- `aof_pending_rewrite`: Flag indicating an AOF rewrite operation will be - scheduled once the on-going RDB save is complete. -- `aof_buffer_length`: Size of the AOF buffer -- `aof_rewrite_buffer_length`: Size of the AOF rewrite buffer -- `aof_pending_bio_fsync`: Number of fsync pending jobs in background I/O queue -- `aof_delayed_fsync`: Delayed fsync counter +* `aof_current_size`: AOF current file size +* `aof_base_size`: AOF file size on latest startup or rewrite +* `aof_pending_rewrite`: Flag indicating an AOF rewrite operation + will be scheduled once the on-going RDB save is complete. +* `aof_buffer_length`: Size of the AOF buffer +* `aof_rewrite_buffer_length`: Size of the AOF rewrite buffer. Note this field was removed in Redis 7.0 +* `aof_pending_bio_fsync`: Number of fsync pending jobs in background I/O + queue +* `aof_delayed_fsync`: Delayed fsync counter If a load operation is on-going, these additional fields will be added: -- `loading_start_time`: Epoch-based timestamp of the start of the load operation -- `loading_total_bytes`: Total file size -- `loading_rdb_used_mem`: The memory usage of the server that had generated the - RDB file at the time of the file's creation -- `loading_loaded_bytes`: Number of bytes already loaded -- `loading_loaded_perc`: Same value expressed as a percentage -- `loading_eta_seconds`: ETA in seconds for the load to be complete +* `loading_start_time`: Epoch-based timestamp of the start of the load + operation +* `loading_total_bytes`: Total file size +* `loading_rdb_used_mem`: The memory usage of the server that had generated + the RDB file at the time of the file's creation +* `loading_loaded_bytes`: Number of bytes already loaded +* `loading_loaded_perc`: Same value expressed as a percentage +* `loading_eta_seconds`: ETA in seconds for the load to be complete Here is the meaning of all fields in the **stats** section: -- `total_connections_received`: Total number of connections accepted by the - server -- `total_commands_processed`: Total number of commands processed by the server -- `instantaneous_ops_per_sec`: Number of commands processed per second -- `total_net_input_bytes`: The total number of bytes read from the network -- `total_net_output_bytes`: The total number of bytes written to the network -- `instantaneous_input_kbps`: The network's read rate per second in KB/sec -- `instantaneous_output_kbps`: The network's write rate per second in KB/sec -- `rejected_connections`: Number of connections rejected because of `maxclients` - limit -- `sync_full`: The number of full resyncs with replicas -- `sync_partial_ok`: The number of accepted partial resync requests -- `sync_partial_err`: The number of denied partial resync requests -- `expired_keys`: Total number of key expiration events -- `expired_stale_perc`: The percentage of keys probably expired -- `expired_time_cap_reached_count`: The count of times that active expiry cycles - have stopped early -- `expire_cycle_cpu_milliseconds`: The cumulative amount of time spend on active - expiry cycles -- `evicted_keys`: Number of evicted keys due to `maxmemory` limit -- `keyspace_hits`: Number of successful lookup of keys in the main dictionary -- `keyspace_misses`: Number of failed lookup of keys in the main dictionary -- `pubsub_channels`: Global number of pub/sub channels with client subscriptions -- `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions -- `latest_fork_usec`: Duration of the latest fork operation in microseconds -- `total_forks`: Total number of fork operations since the server start -- `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes -- `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes - (applicable only to writable replicas) -- `active_defrag_hits`: Number of value reallocations performed by active the - defragmentation process -- `active_defrag_misses`: Number of aborted value reallocations started by the - active defragmentation process -- `active_defrag_key_hits`: Number of keys that were actively defragmented -- `active_defrag_key_misses`: Number of keys that were skipped by the active - defragmentation process -- `tracking_total_keys`: Number of keys being tracked by the server -- `tracking_total_items`: Number of items, that is the sum of clients number for - each key, that are being tracked -- `tracking_total_prefixes`: Number of tracked prefixes in server's prefix table - (only applicable for broadcast mode) -- `unexpected_error_replies`: Number of unexpected error replies, that are types - of errors from an AOF load or replication -- `total_error_replies`: Total number of issued error replies, that is the sum - of rejected commands (errors prior command execution) and failed commands - (errors within the command execution) -- `total_reads_processed`: Total number of read events processed -- `total_writes_processed`: Total number of write events processed -- `io_threaded_reads_processed`: Number of read events processed by the main and - I/O threads -- `io_threaded_writes_processed`: Number of write events processed by the main - and I/O threads +* `total_connections_received`: Total number of connections accepted by the + server +* `total_commands_processed`: Total number of commands processed by the server +* `instantaneous_ops_per_sec`: Number of commands processed per second +* `total_net_input_bytes`: The total number of bytes read from the network +* `total_net_output_bytes`: The total number of bytes written to the network +* `total_net_repl_input_bytes`: The total number of bytes read from the network for replication purposes +* `total_net_repl_output_bytes`: The total number of bytes written to the network for replication purposes +* `instantaneous_input_kbps`: The network's read rate per second in KB/sec +* `instantaneous_output_kbps`: The network's write rate per second in KB/sec +* `instantaneous_input_repl_kbps`: The network's read rate per second in KB/sec for replication purposes +* `instantaneous_output_repl_kbps`: The network's write rate per second in KB/sec for replication purposes +* `rejected_connections`: Number of connections rejected because of + `maxclients` limit +* `sync_full`: The number of full resyncs with replicas +* `sync_partial_ok`: The number of accepted partial resync requests +* `sync_partial_err`: The number of denied partial resync requests +* `expired_keys`: Total number of key expiration events +* `expired_stale_perc`: The percentage of keys probably expired +* `expired_time_cap_reached_count`: The count of times that active expiry cycles have stopped early +* `expire_cycle_cpu_milliseconds`: The cumulative amount of time spend on active expiry cycles +* `evicted_keys`: Number of evicted keys due to `maxmemory` limit +* `evicted_clients`: Number of evicted clients due to `maxmemory-clients` limit. Added in Redis 7.0. +* `total_eviction_exceeded_time`: Total time `used_memory` was greater than `maxmemory` since server startup, in milliseconds +* `current_eviction_exceeded_time`: The time passed since `used_memory` last rose above `maxmemory`, in milliseconds +* `keyspace_hits`: Number of successful lookup of keys in the main dictionary +* `keyspace_misses`: Number of failed lookup of keys in the main dictionary +* `pubsub_channels`: Global number of pub/sub channels with client + subscriptions +* `pubsub_patterns`: Global number of pub/sub pattern with client + subscriptions +* `pubsubshard_channels`: Global number of pub/sub shard channels with client subscriptions. Added in Redis 7.0.3 +* `latest_fork_usec`: Duration of the latest fork operation in microseconds +* `total_forks`: Total number of fork operations since the server start +* `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes +* `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes + (applicable only to writable replicas) +* `active_defrag_hits`: Number of value reallocations performed by active the + defragmentation process +* `active_defrag_misses`: Number of aborted value reallocations started by the + active defragmentation process +* `active_defrag_key_hits`: Number of keys that were actively defragmented +* `active_defrag_key_misses`: Number of keys that were skipped by the active + defragmentation process +* `total_active_defrag_time`: Total time memory fragmentation was over the limit, in milliseconds +* `current_active_defrag_time`: The time passed since memory fragmentation last was over the limit, in milliseconds +* `tracking_total_keys`: Number of keys being tracked by the server +* `tracking_total_items`: Number of items, that is the sum of clients number for + each key, that are being tracked +* `tracking_total_prefixes`: Number of tracked prefixes in server's prefix table + (only applicable for broadcast mode) +* `unexpected_error_replies`: Number of unexpected error replies, that are types + of errors from an AOF load or replication +* `total_error_replies`: Total number of issued error replies, that is the sum of + rejected commands (errors prior command execution) and + failed commands (errors within the command execution) +* `dump_payload_sanitizations`: Total number of dump payload deep integrity validations (see `sanitize-dump-payload` config). +* `total_reads_processed`: Total number of read events processed +* `total_writes_processed`: Total number of write events processed +* `io_threaded_reads_processed`: Number of read events processed by the main and I/O threads +* `io_threaded_writes_processed`: Number of write events processed by the main and I/O threads Here is the meaning of all fields in the **replication** section: -- `role`: Value is "master" if the instance is replica of no one, or "slave" if - the instance is a replica of some master instance. Note that a replica can be - master of another replica (chained replication). -- `master_failover_state`: The state of an ongoing failover, if any. -- `master_replid`: The replication ID of the Redis server. -- `master_replid2`: The secondary replication ID, used for PSYNC after a - failover. -- `master_repl_offset`: The server's current replication offset -- `second_repl_offset`: The offset up to which replication IDs are accepted -- `repl_backlog_active`: Flag indicating replication backlog is active -- `repl_backlog_size`: Total size in bytes of the replication backlog buffer -- `repl_backlog_first_byte_offset`: The master offset of the replication backlog - buffer -- `repl_backlog_histlen`: Size in bytes of the data in the replication backlog - buffer +* `role`: Value is "master" if the instance is replica of no one, or "slave" if the instance is a replica of some master instance. + Note that a replica can be master of another replica (chained replication). +* `master_failover_state`: The state of an ongoing failover, if any. +* `master_replid`: The replication ID of the Redis server. +* `master_replid2`: The secondary replication ID, used for PSYNC after a failover. +* `master_repl_offset`: The server's current replication offset +* `second_repl_offset`: The offset up to which replication IDs are accepted +* `repl_backlog_active`: Flag indicating replication backlog is active +* `repl_backlog_size`: Total size in bytes of the replication backlog buffer +* `repl_backlog_first_byte_offset`: The master offset of the replication + backlog buffer +* `repl_backlog_histlen`: Size in bytes of the data in the replication backlog + buffer If the instance is a replica, these additional fields are provided: -- `master_host`: Host or IP address of the master -- `master_port`: Master listening TCP port -- `master_link_status`: Status of the link (up/down) -- `master_last_io_seconds_ago`: Number of seconds since the last interaction - with master -- `master_sync_in_progress`: Indicate the master is syncing to the replica -- `slave_repl_offset`: The replication offset of the replica instance -- `slave_priority`: The priority of the instance as a candidate for failover -- `slave_read_only`: Flag indicating if the replica is read-only +* `master_host`: Host or IP address of the master +* `master_port`: Master listening TCP port +* `master_link_status`: Status of the link (up/down) +* `master_last_io_seconds_ago`: Number of seconds since the last interaction + with master +* `master_sync_in_progress`: Indicate the master is syncing to the replica +* `slave_read_repl_offset`: The read replication offset of the replica instance. +* `slave_repl_offset`: The replication offset of the replica instance +* `slave_priority`: The priority of the instance as a candidate for failover +* `slave_read_only`: Flag indicating if the replica is read-only +* `replica_announced`: Flag indicating if the replica is announced by Sentinel. If a SYNC operation is on-going, these additional fields are provided: -- `master_sync_total_bytes`: Total number of bytes that need to be transferred. - this may be 0 when the size is unknown (for example, when the - `repl-diskless-sync` configuration directive is used) -- `master_sync_read_bytes`: Number of bytes already transferred -- `master_sync_left_bytes`: Number of bytes left before syncing is complete (may - be negative when `master_sync_total_bytes` is 0) -- `master_sync_perc`: The percentage `master_sync_read_bytes` from - `master_sync_total_bytes`, or an approximation that uses - `loading_rdb_used_mem` when `master_sync_total_bytes` is 0 -- `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O - during a SYNC operation +* `master_sync_total_bytes`: Total number of bytes that need to be + transferred. this may be 0 when the size is unknown (for example, when + the `repl-diskless-sync` configuration directive is used) +* `master_sync_read_bytes`: Number of bytes already transferred +* `master_sync_left_bytes`: Number of bytes left before syncing is complete + (may be negative when `master_sync_total_bytes` is 0) +* `master_sync_perc`: The percentage `master_sync_read_bytes` from + `master_sync_total_bytes`, or an approximation that uses + `loading_rdb_used_mem` when `master_sync_total_bytes` is 0 +* `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O + during a SYNC operation If the link between master and replica is down, an additional field is provided: -- `master_link_down_since_seconds`: Number of seconds since the link is down +* `master_link_down_since_seconds`: Number of seconds since the link is down The following field is always provided: -- `connected_slaves`: Number of connected replicas +* `connected_slaves`: Number of connected replicas -If the server is configured with the `min-slaves-to-write` (or starting with -Redis 5 with the `min-replicas-to-write`) directive, an additional field is -provided: +If the server is configured with the `min-slaves-to-write` (or starting with Redis 5 with the `min-replicas-to-write`) directive, an additional field is provided: -- `min_slaves_good_slaves`: Number of replicas currently considered good +* `min_slaves_good_slaves`: Number of replicas currently considered good For each replica, the following line is added: -- `slaveXXX`: id, IP address, port, state, offset, lag +* `slaveXXX`: id, IP address, port, state, offset, lag Here is the meaning of all fields in the **cpu** section: -- `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of - system CPU consumed by all threads of the server process (main thread and - background threads) -- `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of - user CPU consumed by all threads of the server process (main thread and - background threads) -- `used_cpu_sys_children`: System CPU consumed by the background processes -- `used_cpu_user_children`: User CPU consumed by the background processes -- `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main - thread -- `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread +* `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of system CPU consumed by all threads of the server process (main thread and background threads) +* `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of user CPU consumed by all threads of the server process (main thread and background threads) +* `used_cpu_sys_children`: System CPU consumed by the background processes +* `used_cpu_user_children`: User CPU consumed by the background processes +* `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main thread +* `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread The **commandstats** section provides statistics based on the command type, -including the number of calls that reached command execution (not rejected), the -total CPU time consumed by these commands, the average CPU consumed per command -execution, the number of rejected calls (errors prior command execution), and -the number of failed calls (errors within the command execution). + including the number of calls that reached command execution (not rejected), + the total CPU time consumed by these commands, the average CPU consumed + per command execution, the number of rejected calls + (errors prior command execution), and the number of failed calls + (errors within the command execution). + +For each command type, the following line is added: + +* `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX` + +The **latencystats** section provides latency percentile distribution statistics based on the command type. + + By default, the exported latency percentiles are the p50, p99, and p999. + If you need to change the exported percentiles, use `CONFIG SET latency-tracking-info-percentiles "50.0 99.0 99.9"`. + + This section requires the extended latency monitoring feature to be enabled (by default it's enabled). + If you need to enable it, use `CONFIG SET latency-tracking yes`. For each command type, the following line is added: -- `cmdstat_XXX`: - `calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX` +* `latency_percentiles_usec_XXX: p<percentile 1>=<percentile 1 value>,p<percentile 2>=<percentile 2 value>,...` -The **errorstats** section enables keeping track of the different errors that -occurred within Redis, based upon the reply error prefix ( The first word after -the "-", up to the first space. Example: `ERR` ). +The **errorstats** section enables keeping track of the different errors that occurred within Redis, + based upon the reply error prefix ( The first word after the "-", up to the first space. Example: `ERR` ). For each error type, the following line is added: -- `errorstat_XXX`: `count=XXX` +* `errorstat_XXX`: `count=XXX` The **cluster** section currently only contains a unique field: -- `cluster_enabled`: Indicate Redis cluster is enabled +* `cluster_enabled`: Indicate Redis cluster is enabled -The **modules** section contains additional information about loaded modules if -the modules provide it. The field part of properties lines in this section is -always prefixed with the module's name. +The **modules** section contains additional information about loaded modules if the modules provide it. The field part of properties lines in this section is always prefixed with the module's name. The **keyspace** section provides statistics on the main dictionary of each -database. The statistics are the number of keys, and the number of keys with an -expiration. +database. +The statistics are the number of keys, and the number of keys with an expiration. For each database, the following line is added: -- `dbXXX`: `keys=XXX,expires=XXX` +* `dbXXX`: `keys=XXX,expires=XXX` [hcgcpgp]: http://code.google.com/p/google-perftools/ -**A note about the word slave used in this man page**: Starting with Redis 5, if -not for backward compatibility, the Redis project no longer uses the word slave. -Unfortunately in this command the word slave is part of the protocol, so we'll -be able to remove such occurrences only when this API will be naturally -deprecated. - -**Modules generated sections**: Starting with Redis 6, modules can inject their -info into the `INFO` command, these are excluded by default even when the `all` -argument is provided (it will include a list of loaded modules but not their -generated info fields). To get these you must use either the `modules` argument -or `everything`., +**A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. + +**Modules generated sections**: Starting with Redis 6, modules can inject their info into the `INFO` command, these are excluded by default even when the `all` argument is provided (it will include a list of loaded modules but not their generated info fields). To get these you must use either the `modules` argument or `everything`., diff --git a/iredis/data/commands/keys.md b/iredis/data/commands/keys.md index 8991beb..186caca 100644 --- a/iredis/data/commands/keys.md +++ b/iredis/data/commands/keys.md @@ -1,25 +1,28 @@ Returns all keys matching `pattern`. While the time complexity for this operation is O(N), the constant times are -fairly low. For example, Redis running on an entry level laptop can scan a 1 -million key database in 40 milliseconds. +fairly low. +For example, Redis running on an entry level laptop can scan a 1 million key +database in 40 milliseconds. **Warning**: consider `KEYS` as a command that should only be used in production -environments with extreme care. It may ruin performance when it is executed -against large databases. This command is intended for debugging and special -operations, such as changing your keyspace layout. Don't use `KEYS` in your -regular application code. If you're looking for a way to find keys in a subset -of your keyspace, consider using `SCAN` or [sets][tdts]. +environments with extreme care. +It may ruin performance when it is executed against large databases. +This command is intended for debugging and special operations, such as changing +your keyspace layout. +Don't use `KEYS` in your regular application code. +If you're looking for a way to find keys in a subset of your keyspace, consider +using `SCAN` or [sets][tdts]. [tdts]: /topics/data-types#sets Supported glob-style patterns: -- `h?llo` matches `hello`, `hallo` and `hxllo` -- `h*llo` matches `hllo` and `heeeello` -- `h[ae]llo` matches `hello` and `hallo,` but not `hillo` -- `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` -- `h[a-b]llo` matches `hallo` and `hbllo` +* `h?llo` matches `hello`, `hallo` and `hxllo` +* `h*llo` matches `hllo` and `heeeello` +* `h[ae]llo` matches `hello` and `hallo,` but not `hillo` +* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` +* `h[a-b]llo` matches `hallo` and `hbllo` Use `\` to escape special characters if you want to match them verbatim. diff --git a/iredis/data/commands/lastsave.md b/iredis/data/commands/lastsave.md index 8a4dea1..cfec625 100644 --- a/iredis/data/commands/lastsave.md +++ b/iredis/data/commands/lastsave.md @@ -1,7 +1,7 @@ -Return the UNIX TIME of the last DB save executed with success. A client may -check if a `BGSAVE` command succeeded reading the `LASTSAVE` value, then issuing -a `BGSAVE` command and checking at regular intervals every N seconds if -`LASTSAVE` changed. +Return the UNIX TIME of the last DB save executed with success. +A client may check if a `BGSAVE` command succeeded reading the `LASTSAVE` value, +then issuing a `BGSAVE` command and checking at regular intervals every N +seconds if `LASTSAVE` changed. @return diff --git a/iredis/data/commands/latency-doctor.md b/iredis/data/commands/latency-doctor.md index e089abb..8f493aa 100644 --- a/iredis/data/commands/latency-doctor.md +++ b/iredis/data/commands/latency-doctor.md @@ -1,5 +1,4 @@ -The `LATENCY DOCTOR` command reports about different latency-related issues and -advises about possible remedies. +The `LATENCY DOCTOR` command reports about different latency-related issues and advises about possible remedies. This command is the most powerful analysis tool in the latency monitoring framework, and is able to provide additional statistical data like the average @@ -7,10 +6,10 @@ period between latency spikes, the median deviation, and a human-readable analysis of the event. For certain events, like `fork`, additional information is provided, like the rate at which the system forks processes. -This is the output you should post in the Redis mailing list if you are looking -for help about Latency related issues. +This is the output you should post in the Redis mailing list if you are +looking for help about Latency related issues. -@example +@examples ``` 127.0.0.1:6379> latency doctor @@ -35,8 +34,7 @@ I have a few advices for you: fragment those objects into multiple smaller objects. ``` -**Note:** the doctor has erratic psychological behaviors, so we recommend -interacting with it carefully. +**Note:** the doctor has erratic psychological behaviors, so we recommend interacting with it carefully. For more information refer to the [Latency Monitoring Framework page][lm]. diff --git a/iredis/data/commands/latency-graph.md b/iredis/data/commands/latency-graph.md index ee3d93f..f1cfa5e 100644 --- a/iredis/data/commands/latency-graph.md +++ b/iredis/data/commands/latency-graph.md @@ -1,30 +1,26 @@ Produces an ASCII-art style graph for the specified event. -`LATENCY GRAPH` lets you intuitively understand the latency trend of an `event` -via state-of-the-art visualization. It can be used for quickly grasping the -situation before resorting to means such parsing the raw data from -`LATENCY HISTORY` or external tooling. +`LATENCY GRAPH` lets you intuitively understand the latency trend of an `event` via state-of-the-art visualization. It can be used for quickly grasping the situation before resorting to means such parsing the raw data from `LATENCY HISTORY` or external tooling. Valid values for `event` are: +* `active-defrag-cycle` +* `aof-fsync-always` +* `aof-stat` +* `aof-rewrite-diff-write` +* `aof-rename` +* `aof-write` +* `aof-write-active-child` +* `aof-write-alone` +* `aof-write-pending-fsync` +* `command` +* `expire-cycle` +* `eviction-cycle` +* `eviction-del` +* `fast-command` +* `fork` +* `rdb-unlink-temp-file` -- `active-defrag-cycle` -- `aof-fsync-always` -- `aof-stat` -- `aof-rewrite-diff-write` -- `aof-rename` -- `aof-write` -- `aof-write-active-child` -- `aof-write-alone` -- `aof-write-pending-fsync` -- `command` -- `expire-cycle` -- `eviction-cycle` -- `eviction-del` -- `fast-command` -- `fork` -- `rdb-unlink-temp-file` - -@example +@examples ``` 127.0.0.1:6379> latency reset command @@ -56,8 +52,8 @@ The vertical labels under each graph column represent the amount of seconds, minutes, hours or days ago the event happened. For example "15s" means that the first graphed event happened 15 seconds ago. -The graph is normalized in the min-max scale so that the zero (the underscore in -the lower row) is the minimum, and a # in the higher row is the maximum. +The graph is normalized in the min-max scale so that the zero (the underscore +in the lower row) is the minimum, and a # in the higher row is the maximum. For more information refer to the [Latency Monitoring Framework page][lm]. @@ -65,4 +61,4 @@ For more information refer to the [Latency Monitoring Framework page][lm]. @return -@bulk-string-reply +@bulk-string-reply
\ No newline at end of file diff --git a/iredis/data/commands/latency-histogram.md b/iredis/data/commands/latency-histogram.md new file mode 100644 index 0000000..a97928b --- /dev/null +++ b/iredis/data/commands/latency-histogram.md @@ -0,0 +1,38 @@ +The `LATENCY HISTOGRAM` command reports a cumulative distribution of latencies in the format of a histogram for each of the specified command names. +If no command names are specified then all commands that contain latency information will be replied. + +Each reported histogram has the following fields: + +* Command name. +* The total calls for that command. +* A map of time buckets: + * Each bucket represents a latency range. + * Each bucket covers twice the previous bucket's range. + * Empty buckets are not printed. + * The tracked latencies are between 1 microsecond and roughly 1 second. + * Everything above 1 sec is considered +Inf. + * At max there will be log2(1000000000)=30 buckets. + +This command requires the extended latency monitoring feature to be enabled (by default it's enabled). +If you need to enable it, use `CONFIG SET latency-tracking yes`. + +@examples + +``` +127.0.0.1:6379> LATENCY HISTOGRAM set +1# "set" => + 1# "calls" => (integer) 100000 + 2# "histogram_usec" => + 1# (integer) 1 => (integer) 99583 + 2# (integer) 2 => (integer) 99852 + 3# (integer) 4 => (integer) 99914 + 4# (integer) 8 => (integer) 99940 + 5# (integer) 16 => (integer) 99968 + 6# (integer) 33 => (integer) 100000 +``` + +@return + +@array-reply: specifically: + +The command returns a map where each key is a command name, and each value is a map with the total calls, and an inner map of the histogram time buckets. diff --git a/iredis/data/commands/latency-history.md b/iredis/data/commands/latency-history.md index de48ecd..815e644 100644 --- a/iredis/data/commands/latency-history.md +++ b/iredis/data/commands/latency-history.md @@ -1,31 +1,28 @@ -The `LATENCY HISTORY` command returns the raw data of the `event`'s latency -spikes time series. +The `LATENCY HISTORY` command returns the raw data of the `event`'s latency spikes time series. -This is useful to an application that wants to fetch raw data in order to -perform monitoring, display graphs, and so forth. +This is useful to an application that wants to fetch raw data in order to perform monitoring, display graphs, and so forth. The command will return up to 160 timestamp-latency pairs for the `event`. Valid values for `event` are: - -- `active-defrag-cycle` -- `aof-fsync-always` -- `aof-stat` -- `aof-rewrite-diff-write` -- `aof-rename` -- `aof-write` -- `aof-write-active-child` -- `aof-write-alone` -- `aof-write-pending-fsync` -- `command` -- `expire-cycle` -- `eviction-cycle` -- `eviction-del` -- `fast-command` -- `fork` -- `rdb-unlink-temp-file` - -@example +* `active-defrag-cycle` +* `aof-fsync-always` +* `aof-stat` +* `aof-rewrite-diff-write` +* `aof-rename` +* `aof-write` +* `aof-write-active-child` +* `aof-write-alone` +* `aof-write-pending-fsync` +* `command` +* `expire-cycle` +* `eviction-cycle` +* `eviction-del` +* `fast-command` +* `fork` +* `rdb-unlink-temp-file` + +@examples ``` 127.0.0.1:6379> latency history command @@ -44,4 +41,4 @@ For more information refer to the [Latency Monitoring Framework page][lm]. @array-reply: specifically: The command returns an array where each element is a two elements array -representing the timestamp and the latency of the event. +representing the timestamp and the latency of the event.
\ No newline at end of file diff --git a/iredis/data/commands/latency-latest.md b/iredis/data/commands/latency-latest.md index 883f0a5..918435b 100644 --- a/iredis/data/commands/latency-latest.md +++ b/iredis/data/commands/latency-latest.md @@ -2,15 +2,15 @@ The `LATENCY LATEST` command reports the latest latency events logged. Each reported event has the following fields: -- Event name. -- Unix timestamp of the latest latency spike for the event. -- Latest event latency in millisecond. -- All-time maximum latency for this event. +* Event name. +* Unix timestamp of the latest latency spike for the event. +* Latest event latency in millisecond. +* All-time maximum latency for this event. -"All-time" means the maximum latency since the Redis instance was started, or -the time that events were reset `LATENCY RESET`. +"All-time" means the maximum latency since the Redis instance was +started, or the time that events were reset `LATENCY RESET`. -@example: +@examples ``` 127.0.0.1:6379> debug sleep 1 @@ -34,5 +34,4 @@ For more information refer to the [Latency Monitoring Framework page][lm]. @array-reply: specifically: The command returns an array where each element is a four elements array -representing the event's name, timestamp, latest and all-time latency -measurements. +representing the event's name, timestamp, latest and all-time latency measurements. diff --git a/iredis/data/commands/latency-reset.md b/iredis/data/commands/latency-reset.md index 8d089e6..9762869 100644 --- a/iredis/data/commands/latency-reset.md +++ b/iredis/data/commands/latency-reset.md @@ -1,31 +1,29 @@ -The `LATENCY RESET` command resets the latency spikes time series of all, or -only some, events. +The `LATENCY RESET` command resets the latency spikes time series of all, or only some, events. -When the command is called without arguments, it resets all the events, -discarding the currently logged latency spike events, and resetting the maximum -event time register. +When the command is called without arguments, it resets all the +events, discarding the currently logged latency spike events, and resetting +the maximum event time register. -It is possible to reset only specific events by providing the `event` names as -arguments. +It is possible to reset only specific events by providing the `event` names +as arguments. Valid values for `event` are: - -- `active-defrag-cycle` -- `aof-fsync-always` -- `aof-stat` -- `aof-rewrite-diff-write` -- `aof-rename` -- `aof-write` -- `aof-write-active-child` -- `aof-write-alone` -- `aof-write-pending-fsync` -- `command` -- `expire-cycle` -- `eviction-cycle` -- `eviction-del` -- `fast-command` -- `fork` -- `rdb-unlink-temp-file` +* `active-defrag-cycle` +* `aof-fsync-always` +* `aof-stat` +* `aof-rewrite-diff-write` +* `aof-rename` +* `aof-write` +* `aof-write-active-child` +* `aof-write-alone` +* `aof-write-pending-fsync` +* `command` +* `expire-cycle` +* `eviction-cycle` +* `eviction-del` +* `fast-command` +* `fork` +* `rdb-unlink-temp-file` For more information refer to the [Latency Monitoring Framework page][lm]. diff --git a/iredis/data/commands/latency.md b/iredis/data/commands/latency.md new file mode 100644 index 0000000..fd5c95d --- /dev/null +++ b/iredis/data/commands/latency.md @@ -0,0 +1,3 @@ +This is a container command for latency diagnostics commands. + +To see the list of available commands you can call `LATENCY HELP`.
\ No newline at end of file diff --git a/iredis/data/commands/lcs.md b/iredis/data/commands/lcs.md new file mode 100644 index 0000000..b554686 --- /dev/null +++ b/iredis/data/commands/lcs.md @@ -0,0 +1,79 @@ + +The LCS command implements the longest common subsequence algorithm. Note that this is different than the longest common string algorithm, since matching characters in the string does not need to be contiguous. + +For instance the LCS between "foo" and "fao" is "fo", since scanning the two strings from left to right, the longest common set of characters is composed of the first "f" and then the "o". + +LCS is very useful in order to evaluate how similar two strings are. Strings can represent many things. For instance if two strings are DNA sequences, the LCS will provide a measure of similarity between the two DNA sequences. If the strings represent some text edited by some user, the LCS could represent how different the new text is compared to the old one, and so forth. + +Note that this algorithm runs in `O(N*M)` time, where N is the length of the first string and M is the length of the second string. So either spin a different Redis instance in order to run this algorithm, or make sure to run it against very small strings. + +``` +> MSET key1 ohmytext key2 mynewtext +OK +> LCS key1 key2 +"mytext" +``` + +Sometimes we need just the length of the match: + +``` +> LCS key1 key2 LEN +(integer) 6 +``` + +However what is often very useful, is to know the match position in each strings: + +``` +> LCS key1 key2 IDX +1) "matches" +2) 1) 1) 1) (integer) 4 + 2) (integer) 7 + 2) 1) (integer) 5 + 2) (integer) 8 + 2) 1) 1) (integer) 2 + 2) (integer) 3 + 2) 1) (integer) 0 + 2) (integer) 1 +3) "len" +4) (integer) 6 +``` + +Matches are produced from the last one to the first one, since this is how +the algorithm works, and it more efficient to emit things in the same order. +The above array means that the first match (second element of the array) +is between positions 2-3 of the first string and 0-1 of the second. +Then there is another match between 4-7 and 5-8. + +To restrict the list of matches to the ones of a given minimal length: + +``` +> LCS key1 key2 IDX MINMATCHLEN 4 +1) "matches" +2) 1) 1) 1) (integer) 4 + 2) (integer) 7 + 2) 1) (integer) 5 + 2) (integer) 8 +3) "len" +4) (integer) 6 +``` + +Finally to also have the match len: + +``` +> LCS key1 key2 IDX MINMATCHLEN 4 WITHMATCHLEN +1) "matches" +2) 1) 1) 1) (integer) 4 + 2) (integer) 7 + 2) 1) (integer) 5 + 2) (integer) 8 + 3) (integer) 4 +3) "len" +4) (integer) 6 +``` + +@return + +* Without modifiers the string representing the longest common substring is returned. +* When `LEN` is given the command returns the length of the longest common substring. +* When `IDX` is given the command returns an array with the LCS length and all the ranges in both the strings, start and end offset for each string, where there are matches. When `WITHMATCHLEN` is given each array representing a match will also have the length of the match (see examples). + diff --git a/iredis/data/commands/lindex.md b/iredis/data/commands/lindex.md index c0f6ac3..229c63d 100644 --- a/iredis/data/commands/lindex.md +++ b/iredis/data/commands/lindex.md @@ -1,15 +1,15 @@ -Returns the element at index `index` in the list stored at `key`. The index is -zero-based, so `0` means the first element, `1` the second element and so on. +Returns the element at index `index` in the list stored at `key`. +The index is zero-based, so `0` means the first element, `1` the second element +and so on. Negative indices can be used to designate elements starting at the tail of the -list. Here, `-1` means the last element, `-2` means the penultimate and so -forth. +list. +Here, `-1` means the last element, `-2` means the penultimate and so forth. When the value at `key` is not a list, an error is returned. @return -@bulk-string-reply: the requested element, or `nil` when `index` is out of -range. +@bulk-string-reply: the requested element, or `nil` when `index` is out of range. @examples diff --git a/iredis/data/commands/linsert.md b/iredis/data/commands/linsert.md index 6ff8060..9fe8f61 100644 --- a/iredis/data/commands/linsert.md +++ b/iredis/data/commands/linsert.md @@ -1,5 +1,5 @@ -Inserts `element` in the list stored at `key` either before or after the -reference value `pivot`. +Inserts `element` in the list stored at `key` either before or after the reference +value `pivot`. When `key` does not exist, it is considered an empty list and no operation is performed. diff --git a/iredis/data/commands/llen.md b/iredis/data/commands/llen.md index 17ac581..8c7c70f 100644 --- a/iredis/data/commands/llen.md +++ b/iredis/data/commands/llen.md @@ -1,6 +1,6 @@ -Returns the length of the list stored at `key`. If `key` does not exist, it is -interpreted as an empty list and `0` is returned. An error is returned when the -value stored at `key` is not a list. +Returns the length of the list stored at `key`. +If `key` does not exist, it is interpreted as an empty list and `0` is returned. +An error is returned when the value stored at `key` is not a list. @return diff --git a/iredis/data/commands/lmove.md b/iredis/data/commands/lmove.md index 9100315..ec62ced 100644 --- a/iredis/data/commands/lmove.md +++ b/iredis/data/commands/lmove.md @@ -1,17 +1,19 @@ Atomically returns and removes the first/last element (head/tail depending on -the `wherefrom` argument) of the list stored at `source`, and pushes the element -at the first/last element (head/tail depending on the `whereto` argument) of the -list stored at `destination`. +the `wherefrom` argument) of the list stored at `source`, and pushes the +element at the first/last element (head/tail depending on the `whereto` +argument) of the list stored at `destination`. For example: consider `source` holding the list `a,b,c`, and `destination` -holding the list `x,y,z`. Executing `LMOVE source destination RIGHT LEFT` -results in `source` holding `a,b` and `destination` holding `c,x,y,z`. +holding the list `x,y,z`. +Executing `LMOVE source destination RIGHT LEFT` results in `source` holding +`a,b` and `destination` holding `c,x,y,z`. If `source` does not exist, the value `nil` is returned and no operation is -performed. If `source` and `destination` are the same, the operation is -equivalent to removing the first/last element from the list and pushing it as -first/last element of the list, so it can be considered as a list rotation -command (or a no-op if `wherefrom` is the same as `whereto`). +performed. +If `source` and `destination` are the same, the operation is equivalent to +removing the first/last element from the list and pushing it as first/last +element of the list, so it can be considered as a list rotation command (or a +no-op if `wherefrom` is the same as `whereto`). This command comes in place of the now deprecated `RPOPLPUSH`. Doing `LMOVE RIGHT LEFT` is equivalent. @@ -35,19 +37,21 @@ LRANGE myotherlist 0 -1 ## Pattern: Reliable queue Redis is often used as a messaging server to implement processing of background -jobs or other kinds of messaging tasks. A simple form of queue is often obtained -pushing values into a list in the producer side, and waiting for this values in -the consumer side using `RPOP` (using polling), or `BRPOP` if the client is -better served by a blocking operation. +jobs or other kinds of messaging tasks. +A simple form of queue is often obtained pushing values into a list in the +producer side, and waiting for this values in the consumer side using `RPOP` +(using polling), or `BRPOP` if the client is better served by a blocking +operation. -However in this context the obtained queue is not _reliable_ as messages can be -lost, for example in the case there is a network problem or if the consumer +However in this context the obtained queue is not _reliable_ as messages can +be lost, for example in the case there is a network problem or if the consumer crashes just after the message is received but it is still to process. -`LMOVE` (or `BLMOVE` for the blocking variant) offers a way to avoid this -problem: the consumer fetches the message and at the same time pushes it into a -_processing_ list. It will use the `LREM` command in order to remove the message -from the _processing_ list once the message has been processed. +`LMOVE` (or `BLMOVE` for the blocking variant) offers a way to avoid +this problem: the consumer fetches the message and at the same time pushes it +into a _processing_ list. +It will use the `LREM` command in order to remove the message from the +_processing_ list once the message has been processed. An additional client may monitor the _processing_ list for items that remain there for too much time, and will push those timed out items into the queue @@ -55,22 +59,22 @@ again if needed. ## Pattern: Circular list -Using `LMOVE` with the same source and destination key, a client can visit all -the elements of an N-elements list, one after the other, in O(N) without +Using `LMOVE` with the same source and destination key, a client can visit +all the elements of an N-elements list, one after the other, in O(N) without transferring the full list from the server to the client using a single `LRANGE` operation. The above pattern works even if the following two conditions: -- There are multiple clients rotating the list: they'll fetch different +* There are multiple clients rotating the list: they'll fetch different elements, until all the elements of the list are visited, and the process restarts. -- Even if other clients are actively pushing new items at the end of the list. +* Even if other clients are actively pushing new items at the end of the list. The above makes it very simple to implement a system where a set of items must -be processed by N workers continuously as fast as possible. An example is a -monitoring system that must check that a set of web sites are reachable, with -the smallest delay possible, using a number of parallel workers. +be processed by N workers continuously as fast as possible. +An example is a monitoring system that must check that a set of web sites are +reachable, with the smallest delay possible, using a number of parallel workers. Note that this implementation of workers is trivially scalable and reliable, because even if a message is lost the item is still in the queue and will be diff --git a/iredis/data/commands/lmpop.md b/iredis/data/commands/lmpop.md new file mode 100644 index 0000000..aad5b6a --- /dev/null +++ b/iredis/data/commands/lmpop.md @@ -0,0 +1,35 @@ +Pops one or more elements from the first non-empty list key from the list of provided key names. + +`LMPOP` and `BLMPOP` are similar to the following, more limited, commands: + +- `LPOP` or `RPOP` which take only one key, and can return multiple elements. +- `BLPOP` or `BRPOP` which take multiple keys, but return only one element from just one key. + +See `BLMPOP` for the blocking variant of this command. + +Elements are popped from either the left or right of the first non-empty list based on the passed argument. +The number of returned elements is limited to the lower between the non-empty list's length, and the count argument (which defaults to 1). + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of elements. + +@examples + +```cli +LMPOP 2 non1 non2 LEFT COUNT 10 +LPUSH mylist "one" "two" "three" "four" "five" +LMPOP 1 mylist LEFT +LRANGE mylist 0 -1 +LMPOP 1 mylist RIGHT COUNT 10 +LPUSH mylist "one" "two" "three" "four" "five" +LPUSH mylist2 "a" "b" "c" "d" "e" +LMPOP 2 mylist mylist2 right count 3 +LRANGE mylist 0 -1 +LMPOP 2 mylist mylist2 right count 5 +LMPOP 2 mylist mylist2 right count 10 +EXISTS mylist mylist2 +``` diff --git a/iredis/data/commands/lolwut.md b/iredis/data/commands/lolwut.md index c19567c..a767a38 100644 --- a/iredis/data/commands/lolwut.md +++ b/iredis/data/commands/lolwut.md @@ -1,7 +1,7 @@ -The LOLWUT command displays the Redis version: however as a side effect of doing -so, it also creates a piece of generative computer art that is different with -each version of Redis. The command was introduced in Redis 5 and announced with -this [blog post](http://antirez.com/news/123). +The LOLWUT command displays the Redis version: however as a side effect of +doing so, it also creates a piece of generative computer art that is different +with each version of Redis. The command was introduced in Redis 5 and announced +with this [blog post](http://antirez.com/news/123). By default the `LOLWUT` command will display the piece corresponding to the current Redis version, however it is possible to display a specific version @@ -9,28 +9,21 @@ using the following form: LOLWUT VERSION 5 ... other optional arguments ... -Of course the "5" above is an example. Each LOLWUT version takes a different set -of arguments in order to change the output. The user is encouraged to play with -it to discover how the output changes adding more numerical arguments. +Of course the "5" above is an example. Each LOLWUT version takes a different +set of arguments in order to change the output. The user is encouraged to +play with it to discover how the output changes adding more numerical +arguments. LOLWUT wants to be a reminder that there is more in programming than just -putting some code together in order to create something useful. Every LOLWUT -version should have the following properties: +putting some code together in order to create something useful. Every +LOLWUT version should have the following properties: -1. It should display some computer art. There are no limits as long as the - output works well in a normal terminal display. However the output should not - be limited to graphics (like LOLWUT 5 and 6 actually do), but can be - generative poetry and other non graphical things. -2. LOLWUT output should be completely useless. Displaying some useful Redis - internal metrics does not count as a valid LOLWUT. -3. LOLWUT output should be fast to generate so that the command can be called in - production instances without issues. It should remain fast even when the user - experiments with odd parameters. -4. LOLWUT implementations should be safe and carefully checked for security, and - resist to untrusted inputs if they take arguments. +1. It should display some computer art. There are no limits as long as the output works well in a normal terminal display. However the output should not be limited to graphics (like LOLWUT 5 and 6 actually do), but can be generative poetry and other non graphical things. +2. LOLWUT output should be completely useless. Displaying some useful Redis internal metrics does not count as a valid LOLWUT. +3. LOLWUT output should be fast to generate so that the command can be called in production instances without issues. It should remain fast even when the user experiments with odd parameters. +4. LOLWUT implementations should be safe and carefully checked for security, and resist to untrusted inputs if they take arguments. 5. LOLWUT must always display the Redis version at the end. @return -@bulk-string-reply (or verbatim reply when using the RESP3 protocol): the string -containing the generative computer art, and a text with the Redis version. +@bulk-string-reply (or verbatim reply when using the RESP3 protocol): the string containing the generative computer art, and a text with the Redis version. diff --git a/iredis/data/commands/lpop.md b/iredis/data/commands/lpop.md index f870112..c6e77c2 100644 --- a/iredis/data/commands/lpop.md +++ b/iredis/data/commands/lpop.md @@ -8,17 +8,12 @@ to `count` elements, depending on the list's length. When called without the `count` argument: -@bulk-string-reply: the value of the first element, or `nil` when `key` does not -exist. +@bulk-string-reply: the value of the first element, or `nil` when `key` does not exist. When called with the `count` argument: @array-reply: list of popped elements, or `nil` when `key` does not exist. -@history - -- `>= 6.2`: Added the `count` argument. - @examples ```cli diff --git a/iredis/data/commands/lpos.md b/iredis/data/commands/lpos.md index 77516e1..93fe579 100644 --- a/iredis/data/commands/lpos.md +++ b/iredis/data/commands/lpos.md @@ -1,8 +1,6 @@ -The command returns the index of matching elements inside a Redis list. By -default, when no options are given, it will scan the list from head to tail, -looking for the first match of "element". If the element is found, its index -(the zero-based position in the list) is returned. Otherwise, if no match is -found, `nil` is returned. +The command returns the index of matching elements inside a Redis list. +By default, when no options are given, it will scan the list from head to tail, +looking for the first match of "element". If the element is found, its index (the zero-based position in the list) is returned. Otherwise, if no match is found, `nil` is returned. ``` > RPUSH mylist a b c 1 2 3 c c @@ -10,85 +8,58 @@ found, `nil` is returned. 2 ``` -The optional arguments and options can modify the command's behavior. The `RANK` -option specifies the "rank" of the first element to return, in case there are -multiple matches. A rank of 1 means to return the first match, 2 to return the -second match, and so forth. +The optional arguments and options can modify the command's behavior. +The `RANK` option specifies the "rank" of the first element to return, in case there are multiple matches. A rank of 1 means to return the first match, 2 to return the second match, and so forth. -For instance, in the above example the element "c" is present multiple times, if -I want the index of the second match, I'll write: +For instance, in the above example the element "c" is present multiple times, if I want the index of the second match, I'll write: ``` > LPOS mylist c RANK 2 6 ``` -That is, the second occurrence of "c" is at position 6. A negative "rank" as the -`RANK` argument tells `LPOS` to invert the search direction, starting from the -tail to the head. +That is, the second occurrence of "c" is at position 6. +A negative "rank" as the `RANK` argument tells `LPOS` to invert the search direction, starting from the tail to the head. -So, we want to say, give me the first element starting from the tail of the -list: +So, we want to say, give me the first element starting from the tail of the list: ``` > LPOS mylist c RANK -1 7 ``` -Note that the indexes are still reported in the "natural" way, that is, -considering the first element starting from the head of the list at index 0, the -next element at index 1, and so forth. This basically means that the returned -indexes are stable whatever the rank is positive or negative. +Note that the indexes are still reported in the "natural" way, that is, considering the first element starting from the head of the list at index 0, the next element at index 1, and so forth. This basically means that the returned indexes are stable whatever the rank is positive or negative. -Sometimes we want to return not just the Nth matching element, but the position -of all the first N matching elements. This can be achieved using the `COUNT` -option. +Sometimes we want to return not just the Nth matching element, but the position of all the first N matching elements. This can be achieved using the `COUNT` option. ``` > LPOS mylist c COUNT 2 [2,6] ``` -We can combine `COUNT` and `RANK`, so that `COUNT` will try to return up to the -specified number of matches, but starting from the Nth match, as specified by -the `RANK` option. +We can combine `COUNT` and `RANK`, so that `COUNT` will try to return up to the specified number of matches, but starting from the Nth match, as specified by the `RANK` option. ``` > LPOS mylist c RANK -1 COUNT 2 [7,6] ``` -When `COUNT` is used, it is possible to specify 0 as the number of matches, as a -way to tell the command we want all the matches found returned as an array of -indexes. This is better than giving a very large `COUNT` option because it is -more general. +When `COUNT` is used, it is possible to specify 0 as the number of matches, as a way to tell the command we want all the matches found returned as an array of indexes. This is better than giving a very large `COUNT` option because it is more general. ``` > LPOS mylist c COUNT 0 [2,6,7] ``` -When `COUNT` is used and no match is found, an empty array is returned. However -when `COUNT` is not used and there are no matches, the command returns `nil`. +When `COUNT` is used and no match is found, an empty array is returned. However when `COUNT` is not used and there are no matches, the command returns `nil`. -Finally, the `MAXLEN` option tells the command to compare the provided element -only with a given maximum number of list items. So for instance specifying -`MAXLEN 1000` will make sure that the command performs only 1000 comparisons, -effectively running the algorithm on a subset of the list (the first part or the -last part depending on the fact we use a positive or negative rank). This is -useful to limit the maximum complexity of the command. It is also useful when we -expect the match to be found very early, but want to be sure that in case this -is not true, the command does not take too much time to run. +Finally, the `MAXLEN` option tells the command to compare the provided element only with a given maximum number of list items. So for instance specifying `MAXLEN 1000` will make sure that the command performs only 1000 comparisons, effectively running the algorithm on a subset of the list (the first part or the last part depending on the fact we use a positive or negative rank). This is useful to limit the maximum complexity of the command. It is also useful when we expect the match to be found very early, but want to be sure that in case this is not true, the command does not take too much time to run. -When `MAXLEN` is used, it is possible to specify 0 as the maximum number of -comparisons, as a way to tell the command we want unlimited comparisons. This is -better than giving a very large `MAXLEN` option because it is more general. +When `MAXLEN` is used, it is possible to specify 0 as the maximum number of comparisons, as a way to tell the command we want unlimited comparisons. This is better than giving a very large `MAXLEN` option because it is more general. @return -The command returns the integer representing the matching element, or `nil` if -there is no match. However, if the `COUNT` option is given the command returns -an array (empty if there are no matches). +The command returns the integer representing the matching element, or `nil` if there is no match. However, if the `COUNT` option is given the command returns an array (empty if there are no matches). @examples diff --git a/iredis/data/commands/lpush.md b/iredis/data/commands/lpush.md index 2140857..e8b9720 100644 --- a/iredis/data/commands/lpush.md +++ b/iredis/data/commands/lpush.md @@ -1,23 +1,19 @@ -Insert all the specified values at the head of the list stored at `key`. If -`key` does not exist, it is created as empty list before performing the push -operations. When `key` holds a value that is not a list, an error is returned. +Insert all the specified values at the head of the list stored at `key`. +If `key` does not exist, it is created as empty list before performing the push +operations. +When `key` holds a value that is not a list, an error is returned. It is possible to push multiple elements using a single command call just -specifying multiple arguments at the end of the command. Elements are inserted -one after the other to the head of the list, from the leftmost element to the -rightmost element. So for instance the command `LPUSH mylist a b c` will result -into a list containing `c` as first element, `b` as second element and `a` as -third element. +specifying multiple arguments at the end of the command. +Elements are inserted one after the other to the head of the list, from the +leftmost element to the rightmost element. +So for instance the command `LPUSH mylist a b c` will result into a list +containing `c` as first element, `b` as second element and `a` as third element. @return @integer-reply: the length of the list after the push operations. -@history - -- `>= 2.4`: Accepts multiple `element` arguments. In Redis versions older than - 2.4 it was possible to push a single value per command. - @examples ```cli diff --git a/iredis/data/commands/lpushx.md b/iredis/data/commands/lpushx.md index 4cc505f..e98c903 100644 --- a/iredis/data/commands/lpushx.md +++ b/iredis/data/commands/lpushx.md @@ -1,16 +1,12 @@ Inserts specified values at the head of the list stored at `key`, only if `key` -already exists and holds a list. In contrary to `LPUSH`, no operation will be -performed when `key` does not yet exist. +already exists and holds a list. +In contrary to `LPUSH`, no operation will be performed when `key` does not yet +exist. @return @integer-reply: the length of the list after the push operation. -@history - -- `>= 4.0`: Accepts multiple `element` arguments. In Redis versions older than - 4.0 it was possible to push a single value per command. - @examples ```cli diff --git a/iredis/data/commands/lrange.md b/iredis/data/commands/lrange.md index 923b542..7634f3e 100644 --- a/iredis/data/commands/lrange.md +++ b/iredis/data/commands/lrange.md @@ -1,24 +1,27 @@ -Returns the specified elements of the list stored at `key`. The offsets `start` -and `stop` are zero-based indexes, with `0` being the first element of the list -(the head of the list), `1` being the next element and so on. +Returns the specified elements of the list stored at `key`. +The offsets `start` and `stop` are zero-based indexes, with `0` being the first +element of the list (the head of the list), `1` being the next element and so +on. These offsets can also be negative numbers indicating offsets starting at the -end of the list. For example, `-1` is the last element of the list, `-2` the -penultimate, and so on. +end of the list. +For example, `-1` is the last element of the list, `-2` the penultimate, and so +on. ## Consistency with range functions in various programming languages Note that if you have a list of numbers from 0 to 100, `LRANGE list 0 10` will -return 11 elements, that is, the rightmost item is included. This **may or may -not** be consistent with behavior of range-related functions in your programming -language of choice (think Ruby's `Range.new`, `Array#slice` or Python's -`range()` function). +return 11 elements, that is, the rightmost item is included. +This **may or may not** be consistent with behavior of range-related functions +in your programming language of choice (think Ruby's `Range.new`, `Array#slice` +or Python's `range()` function). ## Out-of-range indexes -Out of range indexes will not produce an error. If `start` is larger than the -end of the list, an empty list is returned. If `stop` is larger than the actual -end of the list, Redis will treat it like the last element of the list. +Out of range indexes will not produce an error. +If `start` is larger than the end of the list, an empty list is returned. +If `stop` is larger than the actual end of the list, Redis will treat it like +the last element of the list. @return diff --git a/iredis/data/commands/lrem.md b/iredis/data/commands/lrem.md index c06dda7..36c0c7d 100644 --- a/iredis/data/commands/lrem.md +++ b/iredis/data/commands/lrem.md @@ -1,10 +1,10 @@ -Removes the first `count` occurrences of elements equal to `element` from the -list stored at `key`. The `count` argument influences the operation in the -following ways: +Removes the first `count` occurrences of elements equal to `element` from the list +stored at `key`. +The `count` argument influences the operation in the following ways: -- `count > 0`: Remove elements equal to `element` moving from head to tail. -- `count < 0`: Remove elements equal to `element` moving from tail to head. -- `count = 0`: Remove all elements equal to `element`. +* `count > 0`: Remove elements equal to `element` moving from head to tail. +* `count < 0`: Remove elements equal to `element` moving from tail to head. +* `count = 0`: Remove all elements equal to `element`. For example, `LREM list -2 "hello"` will remove the last two occurrences of `"hello"` in the list stored at `list`. diff --git a/iredis/data/commands/lset.md b/iredis/data/commands/lset.md index fad99ce..8f1c391 100644 --- a/iredis/data/commands/lset.md +++ b/iredis/data/commands/lset.md @@ -1,5 +1,5 @@ -Sets the list element at `index` to `element`. For more information on the -`index` argument, see `LINDEX`. +Sets the list element at `index` to `element`. +For more information on the `index` argument, see `LINDEX`. An error is returned for out of range indexes. diff --git a/iredis/data/commands/ltrim.md b/iredis/data/commands/ltrim.md index fd7fca5..7cae0c7 100644 --- a/iredis/data/commands/ltrim.md +++ b/iredis/data/commands/ltrim.md @@ -1,6 +1,7 @@ Trim an existing list so that it will contain only the specified range of -elements specified. Both `start` and `stop` are zero-based indexes, where `0` is -the first element of the list (the head), `1` the next element and so on. +elements specified. +Both `start` and `stop` are zero-based indexes, where `0` is the first element +of the list (the head), `1` the next element and so on. For example: `LTRIM foobar 0 2` will modify the list stored at `foobar` so that only the first three elements of the list will remain. @@ -11,10 +12,12 @@ element and so on. Out of range indexes will not produce an error: if `start` is larger than the end of the list, or `start > end`, the result will be an empty list (which -causes `key` to be removed). If `end` is larger than the end of the list, Redis -will treat it like the last element of the list. +causes `key` to be removed). +If `end` is larger than the end of the list, Redis will treat it like the last +element of the list. -A common use of `LTRIM` is together with `LPUSH` / `RPUSH`. For example: +A common use of `LTRIM` is together with `LPUSH` / `RPUSH`. +For example: ``` LPUSH mylist someelement @@ -22,10 +25,11 @@ LTRIM mylist 0 99 ``` This pair of commands will push a new element on the list, while making sure -that the list will not grow larger than 100 elements. This is very useful when -using Redis to store logs for example. It is important to note that when used in -this way `LTRIM` is an O(1) operation because in the average case just one -element is removed from the tail of the list. +that the list will not grow larger than 100 elements. +This is very useful when using Redis to store logs for example. +It is important to note that when used in this way `LTRIM` is an O(1) operation +because in the average case just one element is removed from the tail of the +list. @return diff --git a/iredis/data/commands/memory-doctor.md b/iredis/data/commands/memory-doctor.md index 0c9a172..dbb9db3 100644 --- a/iredis/data/commands/memory-doctor.md +++ b/iredis/data/commands/memory-doctor.md @@ -3,4 +3,4 @@ the Redis server experiences, and advises about possible remedies. @return -@bulk-string-reply +@bulk-string-reply
\ No newline at end of file diff --git a/iredis/data/commands/memory-stats.md b/iredis/data/commands/memory-stats.md index 4820c6a..39cd68e 100644 --- a/iredis/data/commands/memory-stats.md +++ b/iredis/data/commands/memory-stats.md @@ -4,47 +4,43 @@ server. The information about memory usage is provided as metrics and their respective values. The following metrics are reported: -- `peak.allocated`: Peak memory consumed by Redis in bytes (see `INFO`'s - `used_memory_peak`) -- `total.allocated`: Total number of bytes allocated by Redis using its - allocator (see `INFO`'s `used_memory`) -- `startup.allocated`: Initial amount of memory consumed by Redis at startup in - bytes (see `INFO`'s `used_memory_startup`) -- `replication.backlog`: Size in bytes of the replication backlog (see `INFO`'s - `repl_backlog_active`) -- `clients.slaves`: The total size in bytes of all replicas overheads (output - and query buffers, connection contexts) -- `clients.normal`: The total size in bytes of all clients overheads (output and - query buffers, connection contexts) -- `aof.buffer`: The summed size in bytes of the current and rewrite AOF buffers - (see `INFO`'s `aof_buffer_length` and `aof_rewrite_buffer_length`, - respectively) -- `lua.caches`: the summed size in bytes of the overheads of the Lua scripts' - caches -- `dbXXX`: For each of the server's databases, the overheads of the main and - expiry dictionaries (`overhead.hashtable.main` and - `overhead.hashtable.expires`, respectively) are reported in bytes -- `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, - `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and - those of the internal data structures that are used in managing the Redis - keyspace (see `INFO`'s `used_memory_overhead`) -- `keys.count`: The total number of keys stored across all databases in the - server -- `keys.bytes-per-key`: The ratio between **net memory usage** - (`total.allocated` minus `startup.allocated`) and `keys.count` -- `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` - subtracted from `total.allocated` (see `INFO`'s `used_memory_dataset`) -- `dataset.percentage`: The percentage of `dataset.bytes` out of the net memory - usage -- `peak.percentage`: The percentage of `peak.allocated` out of `total.allocated` -- `fragmentation`: See `INFO`'s `mem_fragmentation_ratio` +* `peak.allocated`: Peak memory consumed by Redis in bytes (see `INFO`'s + `used_memory_peak`) +* `total.allocated`: Total number of bytes allocated by Redis using its + allocator (see `INFO`'s `used_memory`) +* `startup.allocated`: Initial amount of memory consumed by Redis at startup + in bytes (see `INFO`'s `used_memory_startup`) +* `replication.backlog`: Size in bytes of the replication backlog (see + `INFO`'s `repl_backlog_active`) +* `clients.slaves`: The total size in bytes of all replicas overheads (output + and query buffers, connection contexts) +* `clients.normal`: The total size in bytes of all clients overheads (output + and query buffers, connection contexts) +* `cluster.links`: Memory usage by cluster links (Added in Redis 7.0, see `INFO`'s `mem_cluster_links`). +* `aof.buffer`: The summed size in bytes of AOF related buffers. +* `lua.caches`: the summed size in bytes of the overheads of the Lua scripts' + caches +* `dbXXX`: For each of the server's databases, the overheads of the main and + expiry dictionaries (`overhead.hashtable.main` and + `overhead.hashtable.expires`, respectively) are reported in bytes +* `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, + `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and + those of the internal data structures that are used in managing the + Redis keyspace (see `INFO`'s `used_memory_overhead`) +* `keys.count`: The total number of keys stored across all databases in the + server +* `keys.bytes-per-key`: The ratio between **net memory usage** (`total.allocated` + minus `startup.allocated`) and `keys.count` +* `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` + subtracted from `total.allocated` (see `INFO`'s `used_memory_dataset`) +* `dataset.percentage`: The percentage of `dataset.bytes` out of the net + memory usage +* `peak.percentage`: The percentage of `peak.allocated` out of + `total.allocated` +* `fragmentation`: See `INFO`'s `mem_fragmentation_ratio` @return @array-reply: nested list of memory usage metrics and their values -**A note about the word slave used in this man page**: Starting with Redis 5, if -not for backward compatibility, the Redis project no longer uses the word slave. -Unfortunately in this command the word slave is part of the protocol, so we'll -be able to remove such occurrences only when this API will be naturally -deprecated. +**A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/iredis/data/commands/memory-usage.md b/iredis/data/commands/memory-usage.md index 22e2740..ae5a4bc 100644 --- a/iredis/data/commands/memory-usage.md +++ b/iredis/data/commands/memory-usage.md @@ -6,7 +6,7 @@ administrative overheads that a key its value require. For nested data types, the optional `SAMPLES` option can be provided, where `count` is the number of sampled nested values. By default, this option is set -to `5`. To sample the all of the nested values, use `SAMPLES 0`. +to `5`. To sample the all of the nested values, use `SAMPLES 0`. @examples diff --git a/iredis/data/commands/memory.md b/iredis/data/commands/memory.md new file mode 100644 index 0000000..46bde8d --- /dev/null +++ b/iredis/data/commands/memory.md @@ -0,0 +1,3 @@ +This is a container command for memory introspection and management commands. + +To see the list of available commands you can call `MEMORY HELP`. diff --git a/iredis/data/commands/mget.md b/iredis/data/commands/mget.md index 130f935..8bca6ca 100644 --- a/iredis/data/commands/mget.md +++ b/iredis/data/commands/mget.md @@ -1,6 +1,7 @@ -Returns the values of all specified keys. For every key that does not hold a -string value or does not exist, the special value `nil` is returned. Because of -this, the operation never fails. +Returns the values of all specified keys. +For every key that does not hold a string value or does not exist, the special +value `nil` is returned. +Because of this, the operation never fails. @return diff --git a/iredis/data/commands/migrate.md b/iredis/data/commands/migrate.md index 096f277..318e0e2 100644 --- a/iredis/data/commands/migrate.md +++ b/iredis/data/commands/migrate.md @@ -1,32 +1,33 @@ Atomically transfer a key from a source Redis instance to a destination Redis -instance. On success the key is deleted from the original instance and is -guaranteed to exist in the target instance. +instance. +On success the key is deleted from the original instance and is guaranteed to +exist in the target instance. The command is atomic and blocks the two instances for the time required to transfer the key, at any given time the key will appear to exist in a given instance or in the other instance, unless a timeout error occurs. In 3.2 and above, multiple keys can be pipelined in a single call to `MIGRATE` by passing -the empty string ("") as key and adding the `KEYS` clause. +the empty string ("") as key and adding the `!KEYS` clause. The command internally uses `DUMP` to generate the serialized version of the key -value, and `RESTORE` in order to synthesize the key in the target instance. The -source instance acts as a client for the target instance. If the target instance -returns OK to the `RESTORE` command, the source instance deletes the key using -`DEL`. +value, and `RESTORE` in order to synthesize the key in the target instance. +The source instance acts as a client for the target instance. +If the target instance returns OK to the `RESTORE` command, the source instance +deletes the key using `DEL`. The timeout specifies the maximum idle time in any moment of the communication -with the destination instance in milliseconds. This means that the operation -does not need to be completed within the specified amount of milliseconds, but -that the transfer should make progresses without blocking for more than the -specified amount of milliseconds. +with the destination instance in milliseconds. +This means that the operation does not need to be completed within the specified +amount of milliseconds, but that the transfer should make progresses without +blocking for more than the specified amount of milliseconds. `MIGRATE` needs to perform I/O operations and to honor the specified timeout. When there is an I/O error during the transfer or if the timeout is reached the -operation is aborted and the special error - `IOERR` returned. When this happens -the following two cases are possible: +operation is aborted and the special error - `IOERR` returned. +When this happens the following two cases are possible: -- The key may be on both the instances. -- The key may be only in the source instance. +* The key may be on both the instances. +* The key may be only in the source instance. It is not possible for the key to get lost in the event of a timeout, but the client calling `MIGRATE`, in the event of a timeout error, should check if the @@ -38,44 +39,34 @@ same name was also _already_ present on the target instance). If there are no keys to migrate in the source instance `NOKEY` is returned. Because missing keys are possible in normal conditions, from expiry for example, -`NOKEY` isn't an error. +`NOKEY` isn't an error. ## Migrating multiple keys with a single command call -Starting with Redis 3.0.6 `MIGRATE` supports a new bulk-migration mode that uses -pipelining in order to migrate multiple keys between instances without incurring -in the round trip time latency and other overheads that there are when moving -each key with a single `MIGRATE` call. +Starting with Redis 3.0.6 `MIGRATE` supports a new bulk-migration mode that +uses pipelining in order to migrate multiple keys between instances without +incurring in the round trip time latency and other overheads that there are +when moving each key with a single `MIGRATE` call. -In order to enable this form, the `KEYS` option is used, and the normal _key_ -argument is set to an empty string. The actual key names will be provided after -the `KEYS` argument itself, like in the following example: +In order to enable this form, the `!KEYS` option is used, and the normal *key* +argument is set to an empty string. The actual key names will be provided +after the `!KEYS` argument itself, like in the following example: MIGRATE 192.168.1.34 6379 "" 0 5000 KEYS key1 key2 key3 -When this form is used the `NOKEY` status code is only returned when none of the -keys is present in the instance, otherwise the command is executed, even if just -a single key exists. +When this form is used the `NOKEY` status code is only returned when none +of the keys is present in the instance, otherwise the command is executed, even if +just a single key exists. ## Options -- `COPY` -- Do not remove the key from the local instance. -- `REPLACE` -- Replace existing key on the remote instance. -- `KEYS` -- If the key argument is an empty string, the command will instead - migrate all the keys that follow the `KEYS` option (see the above section for - more info). -- `AUTH` -- Authenticate with the given password to the remote instance. -- `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or - greater ACL auth style). - -@history - -- `>= 3.0.0`: Added the `COPY` and `REPLACE` options. -- `>= 3.0.6`: Added the `KEYS` option. -- `>= 4.0.7`: Added the `AUTH` option. -- `>= 6.0.0`: Added the `AUTH2` option. +* `!COPY` -- Do not remove the key from the local instance. +* `REPLACE` -- Replace existing key on the remote instance. +* `!KEYS` -- If the key argument is an empty string, the command will instead migrate all the keys that follow the `!KEYS` option (see the above section for more info). +* `!AUTH` -- Authenticate with the given password to the remote instance. +* `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or greater ACL auth style). @return -@simple-string-reply: The command returns OK on success, or `NOKEY` if no keys -were found in the source instance. +@simple-string-reply: The command returns OK on success, or `NOKEY` if no keys were +found in the source instance. diff --git a/iredis/data/commands/module-help.md b/iredis/data/commands/module-help.md new file mode 100644 index 0000000..a05bf1e --- /dev/null +++ b/iredis/data/commands/module-help.md @@ -0,0 +1,5 @@ +The `MODULE HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/module-list.md b/iredis/data/commands/module-list.md index d951f23..1bfa3e2 100644 --- a/iredis/data/commands/module-list.md +++ b/iredis/data/commands/module-list.md @@ -6,5 +6,5 @@ Returns information about the modules loaded to the server. module, and is in itself a list of property names and their values. The following properties is reported for each loaded module: -- `name`: Name of the module -- `ver`: Version of the module +* `name`: Name of the module +* `ver`: Version of the module diff --git a/iredis/data/commands/module-loadex.md b/iredis/data/commands/module-loadex.md new file mode 100644 index 0000000..f7a55db --- /dev/null +++ b/iredis/data/commands/module-loadex.md @@ -0,0 +1,15 @@ +Loads a module from a dynamic library at runtime with configuration directives. + +This is an extended version of the `MODULE LOAD` command. + +It loads and initializes the Redis module from the dynamic library specified by the `path` argument. The `path` should be the absolute path of the library, including the full filename. + +You can use the optional `!CONFIG` argument to provide the module with configuration directives. +Any additional arguments that follow the `ARGS` keyword are passed unmodified to the module. + +**Note**: modules can also be loaded at server startup with `loadmodule` +configuration directive in `redis.conf`. + +@return + +@simple-string-reply: `OK` if module was loaded. diff --git a/iredis/data/commands/module-unload.md b/iredis/data/commands/module-unload.md index c5ce38e..84ebebf 100644 --- a/iredis/data/commands/module-unload.md +++ b/iredis/data/commands/module-unload.md @@ -6,7 +6,7 @@ library's filename. Known limitations: -- Modules that register custom data types can not be unloaded. +* Modules that register custom data types can not be unloaded. @return diff --git a/iredis/data/commands/module.md b/iredis/data/commands/module.md new file mode 100644 index 0000000..87fa539 --- /dev/null +++ b/iredis/data/commands/module.md @@ -0,0 +1,3 @@ +This is a container command for module management commands. + +To see the list of available commands you can call `MODULE HELP`. diff --git a/iredis/data/commands/monitor.md b/iredis/data/commands/monitor.md index 28cbeef..2622cdd 100644 --- a/iredis/data/commands/monitor.md +++ b/iredis/data/commands/monitor.md @@ -1,6 +1,7 @@ `MONITOR` is a debugging command that streams back every command processed by -the Redis server. It can help in understanding what is happening to the -database. This command can both be used via `redis-cli` and via `telnet`. +the Redis server. +It can help in understanding what is happening to the database. +This command can both be used via `redis-cli` and via `telnet`. The ability to see all the requests processed by the server is useful in order to spot bugs in an application both when using Redis as a database and as a @@ -12,8 +13,9 @@ $ redis-cli monitor 1339518087.877697 [0 127.0.0.1:60866] "dbsize" 1339518090.420270 [0 127.0.0.1:60866] "set" "x" "6" 1339518096.506257 [0 127.0.0.1:60866] "get" "x" -1339518099.363765 [0 127.0.0.1:60866] "del" "x" -1339518100.544926 [0 127.0.0.1:60866] "get" "x" +1339518099.363765 [0 127.0.0.1:60866] "eval" "return redis.call('set','x','7')" "0" +1339518100.363799 [0 lua] "set" "x" "7" +1339518100.544926 [0 127.0.0.1:60866] "del" "x" ``` Use `SIGINT` (Ctrl-C) to stop a `MONITOR` stream running via `redis-cli`. @@ -41,21 +43,16 @@ via `telnet`. ## Commands not logged by MONITOR -Because of security concerns, all administrative commands are not logged by -`MONITOR`'s output. +Because of security concerns, no administrative commands are logged +by `MONITOR`'s output and sensitive data is redacted in the command `AUTH`. -Furthermore, the following commands are also not logged: - -- `AUTH` -- `EXEC` -- `HELLO` -- `QUIT` +Furthermore, the command `QUIT` is also not logged. ## Cost of running MONITOR -Because `MONITOR` streams back **all** commands, its use comes at a cost. The -following (totally unscientific) benchmark numbers illustrate what the cost of -running `MONITOR` can be. +Because `MONITOR` streams back **all** commands, its use comes at a cost. +The following (totally unscientific) benchmark numbers illustrate what the cost +of running `MONITOR` can be. Benchmark result **without** `MONITOR` running: @@ -80,15 +77,16 @@ INCR: 41771.09 requests per second ``` In this particular case, running a single `MONITOR` client can reduce the -throughput by more than 50%. Running more `MONITOR` clients will reduce -throughput even more. +throughput by more than 50%. +Running more `MONITOR` clients will reduce throughput even more. @return **Non standard return value**, just dumps the received commands in an infinite flow. -@history +## Behavior change history -- `>= 6.2`: `RESET` can be called to exit monitor mode. -- `>= 6.0`: `AUTH` excluded from the command's output. +* `>= 6.0.0`: `AUTH` excluded from the command's output. +* `>= 6.2.0`: "`RESET` can be called to exit monitor mode. +* `>= 6.2.4`: "`AUTH`, `HELLO`, `EVAL`, `EVAL_RO`, `EVALSHA` and `EVALSHA_RO` included in the command's output.
\ No newline at end of file diff --git a/iredis/data/commands/move.md b/iredis/data/commands/move.md index e007a18..ceb212c 100644 --- a/iredis/data/commands/move.md +++ b/iredis/data/commands/move.md @@ -1,11 +1,12 @@ Move `key` from the currently selected database (see `SELECT`) to the specified -destination database. When `key` already exists in the destination database, or -it does not exist in the source database, it does nothing. It is possible to use -`MOVE` as a locking primitive because of this. +destination database. +When `key` already exists in the destination database, or it does not exist in +the source database, it does nothing. +It is possible to use `MOVE` as a locking primitive because of this. @return @integer-reply, specifically: -- `1` if `key` was moved. -- `0` if `key` was not moved. +* `1` if `key` was moved. +* `0` if `key` was not moved. diff --git a/iredis/data/commands/mset.md b/iredis/data/commands/mset.md index 9c17f86..f070d29 100644 --- a/iredis/data/commands/mset.md +++ b/iredis/data/commands/mset.md @@ -1,9 +1,10 @@ -Sets the given keys to their respective values. `MSET` replaces existing values -with new values, just as regular `SET`. See `MSETNX` if you don't want to -overwrite existing values. +Sets the given keys to their respective values. +`MSET` replaces existing values with new values, just as regular `SET`. +See `MSETNX` if you don't want to overwrite existing values. -`MSET` is atomic, so all given keys are set at once. It is not possible for -clients to see that some of the keys were updated while others are unchanged. +`MSET` is atomic, so all given keys are set at once. +It is not possible for clients to see that some of the keys were updated while +others are unchanged. @return diff --git a/iredis/data/commands/msetnx.md b/iredis/data/commands/msetnx.md index e332223..795bfc9 100644 --- a/iredis/data/commands/msetnx.md +++ b/iredis/data/commands/msetnx.md @@ -1,19 +1,21 @@ -Sets the given keys to their respective values. `MSETNX` will not perform any -operation at all even if just a single key already exists. +Sets the given keys to their respective values. +`MSETNX` will not perform any operation at all even if just a single key already +exists. Because of this semantic `MSETNX` can be used in order to set different keys -representing different fields of an unique logic object in a way that ensures +representing different fields of a unique logic object in a way that ensures that either all the fields or none at all are set. -`MSETNX` is atomic, so all given keys are set at once. It is not possible for -clients to see that some of the keys were updated while others are unchanged. +`MSETNX` is atomic, so all given keys are set at once. +It is not possible for clients to see that some of the keys were updated while +others are unchanged. @return @integer-reply, specifically: -- `1` if the all the keys were set. -- `0` if no key was set (at least one key already existed). +* `1` if the all the keys were set. +* `0` if no key was set (at least one key already existed). @examples diff --git a/iredis/data/commands/multi.md b/iredis/data/commands/multi.md index 9ed46b6..dc87892 100644 --- a/iredis/data/commands/multi.md +++ b/iredis/data/commands/multi.md @@ -1,5 +1,5 @@ -Marks the start of a [transaction][tt] block. Subsequent commands will be queued -for atomic execution using `EXEC`. +Marks the start of a [transaction][tt] block. +Subsequent commands will be queued for atomic execution using `EXEC`. [tt]: /topics/transactions diff --git a/iredis/data/commands/object-encoding.md b/iredis/data/commands/object-encoding.md new file mode 100644 index 0000000..3a9583d --- /dev/null +++ b/iredis/data/commands/object-encoding.md @@ -0,0 +1,21 @@ +Returns the internal encoding for the Redis object stored at `<key>` + +Redis objects can be encoded in different ways: + +* Strings can be encoded as: + + - `raw`, normal string encoding. + - `int`, strings representing integers in a 64-bit signed interval, encoded in this way to save space. + - `embstr`, an embedded string, which is an object where the internal simple dynamic string, `sds`, is an unmodifiable string allocated in the same chuck as the object itself. + `embstr` can be strings with lengths up to the hardcoded limit of `OBJ_ENCODING_EMBSTR_SIZE_LIMIT` or 44 bytes. + +* Lists can be encoded as `ziplist` or `linkedlist`. The `ziplist` is the special representation that is used to save space for small lists. +* Sets can be encoded as `intset` or `hashtable`. The `intset` is a special encoding used for small sets composed solely of integers. +* Hashes can be encoded as `ziplist` or `hashtable`. The `ziplist` is a special encoding used for small hashes. +* Sorted Sets can be encoded as `ziplist` or `skiplist` format. As for the List type small sorted sets can be specially encoded using `ziplist`, while the `skiplist` encoding is the one that works with sorted sets of any size. + +All the specially encoded types are automatically converted to the general type once you perform an operation that makes it impossible for Redis to retain the space saving encoding. + +@return + +@bulk-string-reply: the encoding of the object, or `nil` if the key doesn't exist diff --git a/iredis/data/commands/object-freq.md b/iredis/data/commands/object-freq.md new file mode 100644 index 0000000..fdf891e --- /dev/null +++ b/iredis/data/commands/object-freq.md @@ -0,0 +1,9 @@ +This command returns the logarithmic access frequency counter of a Redis object stored at `<key>`. + +The command is only available when the `maxmemory-policy` configuration directive is set to one of the LFU policies. + +@return + +@integer-reply + +The counter's value.
\ No newline at end of file diff --git a/iredis/data/commands/object-help.md b/iredis/data/commands/object-help.md new file mode 100644 index 0000000..f98196c --- /dev/null +++ b/iredis/data/commands/object-help.md @@ -0,0 +1,5 @@ +The `OBJECT HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/object-idletime.md b/iredis/data/commands/object-idletime.md new file mode 100644 index 0000000..2a89641 --- /dev/null +++ b/iredis/data/commands/object-idletime.md @@ -0,0 +1,9 @@ +This command returns the time in seconds since the last access to the value stored at `<key>`. + +The command is only available when the `maxmemory-policy` configuration directive is not set to one of the LFU policies. + +@return + +@integer-reply + +The idle time in seconds.
\ No newline at end of file diff --git a/iredis/data/commands/object-refcount.md b/iredis/data/commands/object-refcount.md new file mode 100644 index 0000000..639c899 --- /dev/null +++ b/iredis/data/commands/object-refcount.md @@ -0,0 +1,7 @@ +This command returns the reference count of the stored at `<key>`. + +@return + +@integer-reply + +The number of references.
\ No newline at end of file diff --git a/iredis/data/commands/object.md b/iredis/data/commands/object.md index e5321f7..887ab9d 100644 --- a/iredis/data/commands/object.md +++ b/iredis/data/commands/object.md @@ -1,80 +1,3 @@ -The `OBJECT` command allows to inspect the internals of Redis Objects associated -with keys. It is useful for debugging or to understand if your keys are using -the specially encoded data types to save space. Your application may also use -the information reported by the `OBJECT` command to implement application level -key eviction policies when using Redis as a Cache. +This is a container command for object introspection commands. -The `OBJECT` command supports multiple sub commands: - -- `OBJECT REFCOUNT <key>` returns the number of references of the value - associated with the specified key. This command is mainly useful for - debugging. -- `OBJECT ENCODING <key>` returns the kind of internal representation used in - order to store the value associated with a key. -- `OBJECT IDLETIME <key>` returns the number of seconds since the object stored - at the specified key is idle (not requested by read or write operations). - While the value is returned in seconds the actual resolution of this timer is - 10 seconds, but may vary in future implementations. This subcommand is - available when `maxmemory-policy` is set to an LRU policy or `noeviction` and - `maxmemory` is set. -- `OBJECT FREQ <key>` returns the logarithmic access frequency counter of the - object stored at the specified key. This subcommand is available when - `maxmemory-policy` is set to an LFU policy. -- `OBJECT HELP` returns a succinct help text. - -Objects can be encoded in different ways: - -- Strings can be encoded as `raw` (normal string encoding) or `int` (strings - representing integers in a 64 bit signed interval are encoded in this way in - order to save space). -- Lists can be encoded as `ziplist` or `linkedlist`. The `ziplist` is the - special representation that is used to save space for small lists. -- Sets can be encoded as `intset` or `hashtable`. The `intset` is a special - encoding used for small sets composed solely of integers. -- Hashes can be encoded as `ziplist` or `hashtable`. The `ziplist` is a special - encoding used for small hashes. -- Sorted Sets can be encoded as `ziplist` or `skiplist` format. As for the List - type small sorted sets can be specially encoded using `ziplist`, while the - `skiplist` encoding is the one that works with sorted sets of any size. - -All the specially encoded types are automatically converted to the general type -once you perform an operation that makes it impossible for Redis to retain the -space saving encoding. - -@return - -Different return values are used for different subcommands. - -- Subcommands `refcount` and `idletime` return integers. -- Subcommand `encoding` returns a bulk reply. - -If the object you try to inspect is missing, a null bulk reply is returned. - -@examples - -``` -redis> lpush mylist "Hello World" -(integer) 4 -redis> object refcount mylist -(integer) 1 -redis> object encoding mylist -"ziplist" -redis> object idletime mylist -(integer) 10 -``` - -In the following example you can see how the encoding changes once Redis is no -longer able to use the space saving encoding. - -``` -redis> set foo 1000 -OK -redis> object encoding foo -"int" -redis> append foo bar -(integer) 7 -redis> get foo -"1000bar" -redis> object encoding foo -"raw" -``` +To see the list of available commands you can call `OBJECT HELP`. diff --git a/iredis/data/commands/persist.md b/iredis/data/commands/persist.md index 4819230..67a0014 100644 --- a/iredis/data/commands/persist.md +++ b/iredis/data/commands/persist.md @@ -6,8 +6,8 @@ is associated). @integer-reply, specifically: -- `1` if the timeout was removed. -- `0` if `key` does not exist or does not have an associated timeout. +* `1` if the timeout was removed. +* `0` if `key` does not exist or does not have an associated timeout. @examples diff --git a/iredis/data/commands/pexpire.md b/iredis/data/commands/pexpire.md index ae5f775..bc2e6f1 100644 --- a/iredis/data/commands/pexpire.md +++ b/iredis/data/commands/pexpire.md @@ -1,12 +1,24 @@ This command works exactly like `EXPIRE` but the time to live of the key is specified in milliseconds instead of seconds. +## Options + +The `PEXPIRE` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + @return @integer-reply, specifically: -- `1` if the timeout was set. -- `0` if `key` does not exist. +* `1` if the timeout was set. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples @@ -15,4 +27,8 @@ SET mykey "Hello" PEXPIRE mykey 1500 TTL mykey PTTL mykey +PEXPIRE mykey 1000 XX +TTL mykey +PEXPIRE mykey 1000 NX +TTL mykey ``` diff --git a/iredis/data/commands/pexpireat.md b/iredis/data/commands/pexpireat.md index 4b3ebb7..21e2853 100644 --- a/iredis/data/commands/pexpireat.md +++ b/iredis/data/commands/pexpireat.md @@ -1,12 +1,24 @@ `PEXPIREAT` has the same effect and semantic as `EXPIREAT`, but the Unix time at which the key will expire is specified in milliseconds instead of seconds. +## Options + +The `PEXPIREAT` command supports a set of options since Redis 7.0: + +* `NX` -- Set expiry only when the key has no expiry +* `XX` -- Set expiry only when the key has an existing expiry +* `GT` -- Set expiry only when the new expiry is greater than current one +* `LT` -- Set expiry only when the new expiry is less than current one + +A non-volatile key is treated as an infinite TTL for the purpose of `GT` and `LT`. +The `GT`, `LT` and `NX` options are mutually exclusive. + @return @integer-reply, specifically: -- `1` if the timeout was set. -- `0` if `key` does not exist. +* `1` if the timeout was set. +* `0` if the timeout was not set. e.g. key doesn't exist, or operation skipped due to the provided arguments. @examples diff --git a/iredis/data/commands/pexpiretime.md b/iredis/data/commands/pexpiretime.md index 3455ef3..9fcda95 100644 --- a/iredis/data/commands/pexpiretime.md +++ b/iredis/data/commands/pexpiretime.md @@ -1,14 +1,11 @@ -`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute -Unix expiration timestamp in milliseconds instead of seconds. +`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute Unix expiration timestamp in milliseconds instead of seconds. @return -@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value -in order to signal an error (see the description below). +@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value in order to signal an error (see the description below). -- The command returns `-1` if the key exists but has no associated expiration - time. -- The command returns `-2` if the key does not exist. +* The command returns `-1` if the key exists but has no associated expiration time. +* The command returns `-2` if the key does not exist. @examples diff --git a/iredis/data/commands/pfadd.md b/iredis/data/commands/pfadd.md index e8e3f03..5d0128b 100644 --- a/iredis/data/commands/pfadd.md +++ b/iredis/data/commands/pfadd.md @@ -1,29 +1,18 @@ -Adds all the element arguments to the HyperLogLog data structure stored at the -variable name specified as first argument. +Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as first argument. -As a side effect of this command the HyperLogLog internals may be updated to -reflect a different estimation of the number of unique items added so far (the -cardinality of the set). +As a side effect of this command the HyperLogLog internals may be updated to reflect a different estimation of the number of unique items added so far (the cardinality of the set). -If the approximated cardinality estimated by the HyperLogLog changed after -executing the command, `PFADD` returns 1, otherwise 0 is returned. The command -automatically creates an empty HyperLogLog structure (that is, a Redis String of -a specified length and with a given encoding) if the specified key does not -exist. +If the approximated cardinality estimated by the HyperLogLog changed after executing the command, `PFADD` returns 1, otherwise 0 is returned. The command automatically creates an empty HyperLogLog structure (that is, a Redis String of a specified length and with a given encoding) if the specified key does not exist. -To call the command without elements but just the variable name is valid, this -will result into no operation performed if the variable already exists, or just -the creation of the data structure if the key does not exist (in the latter case -1 is returned). +To call the command without elements but just the variable name is valid, this will result into no operation performed if the variable already exists, or just the creation of the data structure if the key does not exist (in the latter case 1 is returned). -For an introduction to HyperLogLog data structure check the `PFCOUNT` command -page. +For an introduction to HyperLogLog data structure check the `PFCOUNT` command page. @return @integer-reply, specifically: -- 1 if at least 1 HyperLogLog internal register was altered. 0 otherwise. +* 1 if at least 1 HyperLogLog internal register was altered. 0 otherwise. @examples diff --git a/iredis/data/commands/pfcount.md b/iredis/data/commands/pfcount.md index e39b19e..71d1093 100644 --- a/iredis/data/commands/pfcount.md +++ b/iredis/data/commands/pfcount.md @@ -1,32 +1,21 @@ -When called with a single key, returns the approximated cardinality computed by -the HyperLogLog data structure stored at the specified variable, which is 0 if -the variable does not exist. +When called with a single key, returns the approximated cardinality computed by the HyperLogLog data structure stored at the specified variable, which is 0 if the variable does not exist. -When called with multiple keys, returns the approximated cardinality of the -union of the HyperLogLogs passed, by internally merging the HyperLogLogs stored -at the provided keys into a temporary HyperLogLog. +When called with multiple keys, returns the approximated cardinality of the union of the HyperLogLogs passed, by internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. -The HyperLogLog data structure can be used in order to count **unique** elements -in a set using just a small constant amount of memory, specifically 12k bytes -for every HyperLogLog (plus a few bytes for the key itself). +The HyperLogLog data structure can be used in order to count **unique** elements in a set using just a small constant amount of memory, specifically 12k bytes for every HyperLogLog (plus a few bytes for the key itself). -The returned cardinality of the observed set is not exact, but approximated with -a standard error of 0.81%. +The returned cardinality of the observed set is not exact, but approximated with a standard error of 0.81%. -For example in order to take the count of all the unique search queries -performed in a day, a program needs to call `PFADD` every time a query is -processed. The estimated number of unique queries can be retrieved with -`PFCOUNT` at any time. +For example in order to take the count of all the unique search queries performed in a day, a program needs to call `PFADD` every time a query is processed. The estimated number of unique queries can be retrieved with `PFCOUNT` at any time. -Note: as a side effect of calling this function, it is possible that the -HyperLogLog is modified, since the last 8 bytes encode the latest computed -cardinality for caching purposes. So `PFCOUNT` is technically a write command. +Note: as a side effect of calling this function, it is possible that the HyperLogLog is modified, since the last 8 bytes encode the latest computed cardinality +for caching purposes. So `PFCOUNT` is technically a write command. @return @integer-reply, specifically: -- The approximated number of unique elements observed via `PFADD`. +* The approximated number of unique elements observed via `PFADD`. @examples @@ -39,7 +28,8 @@ PFADD some-other-hll 1 2 3 PFCOUNT hll some-other-hll ``` -## Performances +Performances +--- When `PFCOUNT` is called with a single key, performances are excellent even if in theory constant times to process a dense HyperLogLog are high. This is @@ -55,39 +45,17 @@ the order of magnitude of the millisecond, and should be not abused. The user should take in mind that single-key and multiple-keys executions of this command are semantically different and have different performances. -## HyperLogLog representation - -Redis HyperLogLogs are represented using a double representation: the _sparse_ -representation suitable for HLLs counting a small number of elements (resulting -in a small number of registers set to non-zero value), and a _dense_ -representation suitable for higher cardinalities. Redis automatically switches -from the sparse to the dense representation when needed. - -The sparse representation uses a run-length encoding optimized to store -efficiently a big number of registers set to zero. The dense representation is a -Redis string of 12288 bytes in order to store 16384 6-bit counters. The need for -the double representation comes from the fact that using 12k (which is the dense -representation memory requirement) to encode just a few registers for smaller -cardinalities is extremely suboptimal. - -Both representations are prefixed with a 16 bytes header, that includes a magic, -an encoding / version field, and the cached cardinality estimation computed, -stored in little endian format (the most significant bit is 1 if the estimation -is invalid since the HyperLogLog was updated since the cardinality was -computed). - -The HyperLogLog, being a Redis string, can be retrieved with `GET` and restored -with `SET`. Calling `PFADD`, `PFCOUNT` or `PFMERGE` commands with a corrupted -HyperLogLog is never a problem, it may return random values but does not affect -the stability of the server. Most of the times when corrupting a sparse -representation, the server recognizes the corruption and returns an error. - -The representation is neutral from the point of view of the processor word size -and endianness, so the same representation is used by 32 bit and 64 bit -processor, big endian or little endian. - -More details about the Redis HyperLogLog implementation can be found in -[this blog post](http://antirez.com/news/75). The source code of the -implementation in the `hyperloglog.c` file is also easy to read and understand, -and includes a full specification for the exact encoding used for the sparse and -dense representations. +HyperLogLog representation +--- + +Redis HyperLogLogs are represented using a double representation: the *sparse* representation suitable for HLLs counting a small number of elements (resulting in a small number of registers set to non-zero value), and a *dense* representation suitable for higher cardinalities. Redis automatically switches from the sparse to the dense representation when needed. + +The sparse representation uses a run-length encoding optimized to store efficiently a big number of registers set to zero. The dense representation is a Redis string of 12288 bytes in order to store 16384 6-bit counters. The need for the double representation comes from the fact that using 12k (which is the dense representation memory requirement) to encode just a few registers for smaller cardinalities is extremely suboptimal. + +Both representations are prefixed with a 16 bytes header, that includes a magic, an encoding / version field, and the cached cardinality estimation computed, stored in little endian format (the most significant bit is 1 if the estimation is invalid since the HyperLogLog was updated since the cardinality was computed). + +The HyperLogLog, being a Redis string, can be retrieved with `GET` and restored with `SET`. Calling `PFADD`, `PFCOUNT` or `PFMERGE` commands with a corrupted HyperLogLog is never a problem, it may return random values but does not affect the stability of the server. Most of the times when corrupting a sparse representation, the server recognizes the corruption and returns an error. + +The representation is neutral from the point of view of the processor word size and endianness, so the same representation is used by 32 bit and 64 bit processor, big endian or little endian. + +More details about the Redis HyperLogLog implementation can be found in [this blog post](http://antirez.com/news/75). The source code of the implementation in the `hyperloglog.c` file is also easy to read and understand, and includes a full specification for the exact encoding used for the sparse and dense representations. diff --git a/iredis/data/commands/pfdebug.md b/iredis/data/commands/pfdebug.md new file mode 100644 index 0000000..b7cceea --- /dev/null +++ b/iredis/data/commands/pfdebug.md @@ -0,0 +1,2 @@ +The `PFDEBUG` command is an internal command. +It is meant to be used for developing and testing Redis.
\ No newline at end of file diff --git a/iredis/data/commands/pfmerge.md b/iredis/data/commands/pfmerge.md index 90e1dc9..c59c930 100644 --- a/iredis/data/commands/pfmerge.md +++ b/iredis/data/commands/pfmerge.md @@ -1,12 +1,13 @@ -Merge multiple HyperLogLog values into an unique value that will approximate the -cardinality of the union of the observed Sets of the source HyperLogLog +Merge multiple HyperLogLog values into a unique value that will approximate +the cardinality of the union of the observed Sets of the source HyperLogLog structures. The computed merged HyperLogLog is set to the destination variable, which is created if does not exist (defaulting to an empty HyperLogLog). -If the destination variable exists, it is treated as one of the source sets and -its cardinality will be included in the cardinality of the computed HyperLogLog. +If the destination variable exists, it is treated as one of the source sets +and its cardinality will be included in the cardinality of the computed +HyperLogLog. @return diff --git a/iredis/data/commands/pfselftest.md b/iredis/data/commands/pfselftest.md new file mode 100644 index 0000000..bdc1e61 --- /dev/null +++ b/iredis/data/commands/pfselftest.md @@ -0,0 +1,2 @@ +The `PFSELFTEST` command is an internal command. +It is meant to be used for developing and testing Redis.
\ No newline at end of file diff --git a/iredis/data/commands/ping.md b/iredis/data/commands/ping.md index 251cc07..c16f760 100644 --- a/iredis/data/commands/ping.md +++ b/iredis/data/commands/ping.md @@ -1,15 +1,18 @@ Returns `PONG` if no argument is provided, otherwise return a copy of the -argument as a bulk. This command is often used to test if a connection is still -alive, or to measure latency. +argument as a bulk. +This command is often used to test if a connection is still alive, or to measure +latency. If the client is subscribed to a channel or a pattern, it will instead return a multi-bulk with a "pong" in the first position and an empty bulk in the second -position, unless an argument is provided in which case it returns a copy of the -argument. +position, unless an argument is provided in which case it returns a copy +of the argument. @return -@simple-string-reply +@simple-string-reply, and specifically `PONG`, when no argument is provided. + +@bulk-string-reply the argument provided, when applicable. @examples diff --git a/iredis/data/commands/psubscribe.md b/iredis/data/commands/psubscribe.md index fb14ca1..81c4bba 100644 --- a/iredis/data/commands/psubscribe.md +++ b/iredis/data/commands/psubscribe.md @@ -2,8 +2,8 @@ Subscribes the client to the given patterns. Supported glob-style patterns: -- `h?llo` subscribes to `hello`, `hallo` and `hxllo` -- `h*llo` subscribes to `hllo` and `heeeello` -- `h[ae]llo` subscribes to `hello` and `hallo,` but not `hillo` +* `h?llo` subscribes to `hello`, `hallo` and `hxllo` +* `h*llo` subscribes to `hllo` and `heeeello` +* `h[ae]llo` subscribes to `hello` and `hallo,` but not `hillo` Use `\` to escape special characters if you want to match them verbatim. diff --git a/iredis/data/commands/psync.md b/iredis/data/commands/psync.md index 756a3b6..8cbacf2 100644 --- a/iredis/data/commands/psync.md +++ b/iredis/data/commands/psync.md @@ -3,12 +3,11 @@ Initiates a replication stream from the master. The `PSYNC` command is called by Redis replicas for initiating a replication stream from the master. -For more information about replication in Redis please check the [replication -page][tr]. +For more information about replication in Redis please check the +[replication page][tr]. [tr]: /topics/replication @return -**Non standard return value**, a bulk transfer of the data followed by `PING` -and write requests from the master. +**Non standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master. diff --git a/iredis/data/commands/pttl.md b/iredis/data/commands/pttl.md index 49eea99..4e08079 100644 --- a/iredis/data/commands/pttl.md +++ b/iredis/data/commands/pttl.md @@ -2,18 +2,16 @@ Like `TTL` this command returns the remaining time to live of a key that has an expire set, with the sole difference that `TTL` returns the amount of remaining time in seconds while `PTTL` returns it in milliseconds. -In Redis 2.6 or older the command returns `-1` if the key does not exist or if -the key exist but has no associated expire. +In Redis 2.6 or older the command returns `-1` if the key does not exist or if the key exist but has no associated expire. Starting with Redis 2.8 the return value in case of error changed: -- The command returns `-2` if the key does not exist. -- The command returns `-1` if the key exists but has no associated expire. +* The command returns `-2` if the key does not exist. +* The command returns `-1` if the key exists but has no associated expire. @return -@integer-reply: TTL in milliseconds, or a negative value in order to signal an -error (see the description above). +@integer-reply: TTL in milliseconds, or a negative value in order to signal an error (see the description above). @examples diff --git a/iredis/data/commands/pubsub-channels.md b/iredis/data/commands/pubsub-channels.md new file mode 100644 index 0000000..8b9a06e --- /dev/null +++ b/iredis/data/commands/pubsub-channels.md @@ -0,0 +1,11 @@ +Lists the currently *active channels*. + +An active channel is a Pub/Sub channel with one or more subscribers (excluding clients subscribed to patterns). + +If no `pattern` is specified, all the channels are listed, otherwise if pattern is specified only channels matching the specified glob-style pattern are listed. + +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. + +@return + +@array-reply: a list of active channels, optionally matching the specified pattern. diff --git a/iredis/data/commands/pubsub-help.md b/iredis/data/commands/pubsub-help.md new file mode 100644 index 0000000..a7ab2a3 --- /dev/null +++ b/iredis/data/commands/pubsub-help.md @@ -0,0 +1,5 @@ +The `PUBSUB HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/pubsub-numpat.md b/iredis/data/commands/pubsub-numpat.md new file mode 100644 index 0000000..6f3a7c9 --- /dev/null +++ b/iredis/data/commands/pubsub-numpat.md @@ -0,0 +1,9 @@ +Returns the number of unique patterns that are subscribed to by clients (that are performed using the `PSUBSCRIBE` command). + +Note that this isn't the count of clients subscribed to patterns, but the total number of unique patterns all the clients are subscribed to. + +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. + +@return + +@integer-reply: the number of patterns all the clients are subscribed to. diff --git a/iredis/data/commands/pubsub-numsub.md b/iredis/data/commands/pubsub-numsub.md new file mode 100644 index 0000000..d4d6b85 --- /dev/null +++ b/iredis/data/commands/pubsub-numsub.md @@ -0,0 +1,11 @@ +Returns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels. + +Note that it is valid to call this command without channels. In this case it will just return an empty list. + +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. + +@return + +@array-reply: a list of channels and number of subscribers for every channel. + +The format is channel, count, channel, count, ..., so the list is flat. The order in which the channels are listed is the same as the order of the channels specified in the command call. diff --git a/iredis/data/commands/pubsub-shardchannels.md b/iredis/data/commands/pubsub-shardchannels.md new file mode 100644 index 0000000..543eab0 --- /dev/null +++ b/iredis/data/commands/pubsub-shardchannels.md @@ -0,0 +1,20 @@ +Lists the currently *active shard channels*. + +An active shard channel is a Pub/Sub shard channel with one or more subscribers. + +If no `pattern` is specified, all the channels are listed, otherwise if pattern is specified only channels matching the specified glob-style pattern are listed. + +The information returned about the active shard channels are at the shard level and not at the cluster level. + +@return + +@array-reply: a list of active channels, optionally matching the specified pattern. + +@examples + +``` +> PUBSUB SHARDCHANNELS +1) "orders" +PUBSUB SHARDCHANNELS o* +1) "orders" +``` diff --git a/iredis/data/commands/pubsub-shardnumsub.md b/iredis/data/commands/pubsub-shardnumsub.md new file mode 100644 index 0000000..8d09d43 --- /dev/null +++ b/iredis/data/commands/pubsub-shardnumsub.md @@ -0,0 +1,19 @@ +Returns the number of subscribers for the specified shard channels. + +Note that it is valid to call this command without channels, in this case it will just return an empty list. + +Cluster note: in a Redis Cluster, `PUBSUB`'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. + +@return + +@array-reply: a list of channels and number of subscribers for every channel. + +The format is channel, count, channel, count, ..., so the list is flat. The order in which the channels are listed is the same as the order of the shard channels specified in the command call. + +@examples + +``` +> PUBSUB SHARDNUMSUB orders +1) "orders" +2) (integer) 1 +``` diff --git a/iredis/data/commands/pubsub.md b/iredis/data/commands/pubsub.md index f5a0a7c..fa10a9e 100644 --- a/iredis/data/commands/pubsub.md +++ b/iredis/data/commands/pubsub.md @@ -1,50 +1,3 @@ -The PUBSUB command is an introspection command that allows to inspect the state -of the Pub/Sub subsystem. It is composed of subcommands that are documented -separately. The general form is: +This is a container command for Pub/Sub introspection commands. - PUBSUB <subcommand> ... args ... - -Cluster note: in a Redis Cluster clients can subscribe to every node, and can -also publish to every other node. The cluster will make sure that published -messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster -only report information from the node's Pub/Sub context, rather than the entire -cluster. - -# PUBSUB CHANNELS [pattern] - -Lists the currently _active channels_. An active channel is a Pub/Sub channel -with one or more subscribers (not including clients subscribed to patterns). - -If no `pattern` is specified, all the channels are listed, otherwise if pattern -is specified only channels matching the specified glob-style pattern are listed. - -@return - -@array-reply: a list of active channels, optionally matching the specified -pattern. - -# `PUBSUB NUMSUB [channel-1 ... channel-N]` - -Returns the number of subscribers (not counting clients subscribed to patterns) -for the specified channels. - -@return - -@array-reply: a list of channels and number of subscribers for every channel. -The format is channel, count, channel, count, ..., so the list is flat. The -order in which the channels are listed is the same as the order of the channels -specified in the command call. - -Note that it is valid to call this command without channels. In this case it -will just return an empty list. - -# `PUBSUB NUMPAT` - -Returns the number of subscriptions to patterns (that are performed using the -`PSUBSCRIBE` command). Note that this is not just the count of clients -subscribed to patterns but the total number of patterns all the clients are -subscribed to. - -@return - -@integer-reply: the number of patterns all the clients are subscribed to. +To see the list of available commands you can call `PUBSUB HELP`. diff --git a/iredis/data/commands/punsubscribe.md b/iredis/data/commands/punsubscribe.md index 03ed279..af8ee7e 100644 --- a/iredis/data/commands/punsubscribe.md +++ b/iredis/data/commands/punsubscribe.md @@ -2,5 +2,6 @@ Unsubscribes the client from the given patterns, or from all of them if none is given. When no patterns are specified, the client is unsubscribed from all the -previously subscribed patterns. In this case, a message for every unsubscribed -pattern will be sent to the client. +previously subscribed patterns. +In this case, a message for every unsubscribed pattern will be sent to the +client. diff --git a/iredis/data/commands/quit.md b/iredis/data/commands/quit.md index b6ce3bf..6be9b55 100644 --- a/iredis/data/commands/quit.md +++ b/iredis/data/commands/quit.md @@ -1,5 +1,6 @@ -Ask the server to close the connection. The connection is closed as soon as all -pending replies have been written to the client. +Ask the server to close the connection. +The connection is closed as soon as all pending replies have been written to the +client. @return diff --git a/iredis/data/commands/readonly.md b/iredis/data/commands/readonly.md index 00e8aad..bc73b9b 100644 --- a/iredis/data/commands/readonly.md +++ b/iredis/data/commands/readonly.md @@ -1,20 +1,18 @@ -Enables read queries for a connection to a Redis Cluster replica node. +Enables read queries for a connection to a Redis Cluster replica node. -Normally replica nodes will redirect clients to the authoritative master for the -hash slot involved in a given command, however clients can use replicas in order -to scale reads using the `READONLY` command. +Normally replica nodes will redirect clients to the authoritative master for +the hash slot involved in a given command, however clients can use replicas +in order to scale reads using the `READONLY` command. -`READONLY` tells a Redis Cluster replica node that the client is willing to read -possibly stale data and is not interested in running write queries. +`READONLY` tells a Redis Cluster replica node that the client is willing to +read possibly stale data and is not interested in running write queries. -When the connection is in readonly mode, the cluster will send a redirection to -the client only if the operation involves keys not served by the replica's +When the connection is in readonly mode, the cluster will send a redirection +to the client only if the operation involves keys not served by the replica's master node. This may happen because: -1. The client sent a command about hash slots never served by the master of this - replica. -2. The cluster was reconfigured (for example resharded) and the replica is no - longer able to serve commands for a given hash slot. +1. The client sent a command about hash slots never served by the master of this replica. +2. The cluster was reconfigured (for example resharded) and the replica is no longer able to serve commands for a given hash slot. @return diff --git a/iredis/data/commands/readwrite.md b/iredis/data/commands/readwrite.md index f31a1f9..d6d7089 100644 --- a/iredis/data/commands/readwrite.md +++ b/iredis/data/commands/readwrite.md @@ -1,9 +1,9 @@ -Disables read queries for a connection to a Redis Cluster slave node. +Disables read queries for a connection to a Redis Cluster replica node. -Read queries against a Redis Cluster slave node are disabled by default, but you -can use the `READONLY` command to change this behavior on a per- connection -basis. The `READWRITE` command resets the readonly mode flag of a connection -back to readwrite. +Read queries against a Redis Cluster replica node are disabled by default, +but you can use the `READONLY` command to change this behavior on a per- +connection basis. The `READWRITE` command resets the readonly mode flag +of a connection back to readwrite. @return diff --git a/iredis/data/commands/rename.md b/iredis/data/commands/rename.md index 5a38f63..471ecf4 100644 --- a/iredis/data/commands/rename.md +++ b/iredis/data/commands/rename.md @@ -1,17 +1,8 @@ -Renames `key` to `newkey`. It returns an error when `key` does not exist. If -`newkey` already exists it is overwritten, when this happens `RENAME` executes -an implicit `DEL` operation, so if the deleted key contains a very big value it -may cause high latency even if `RENAME` itself is usually a constant-time -operation. +Renames `key` to `newkey`. +It returns an error when `key` does not exist. +If `newkey` already exists it is overwritten, when this happens `RENAME` executes an implicit `DEL` operation, so if the deleted key contains a very big value it may cause high latency even if `RENAME` itself is usually a constant-time operation. -In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, -meaning that in practice only keys that have the same hash tag can be reliably -renamed in cluster. - -@history - -- `<= 3.2.0`: Before Redis 3.2.0, an error is returned if source and destination - names are the same. +In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, meaning that in practice only keys that have the same hash tag can be reliably renamed in cluster. @return @@ -24,3 +15,7 @@ SET mykey "Hello" RENAME mykey myotherkey GET myotherkey ``` + +## Behavior change history + +* `>= 3.2.0`: The command no longer returns an error when source and destination names are the same.
\ No newline at end of file diff --git a/iredis/data/commands/renamenx.md b/iredis/data/commands/renamenx.md index 8f98ec5..c132af4 100644 --- a/iredis/data/commands/renamenx.md +++ b/iredis/data/commands/renamenx.md @@ -1,21 +1,14 @@ -Renames `key` to `newkey` if `newkey` does not yet exist. It returns an error -when `key` does not exist. +Renames `key` to `newkey` if `newkey` does not yet exist. +It returns an error when `key` does not exist. -In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, -meaning that in practice only keys that have the same hash tag can be reliably -renamed in cluster. - -@history - -- `<= 3.2.0`: Before Redis 3.2.0, an error is returned if source and destination - names are the same. +In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, meaning that in practice only keys that have the same hash tag can be reliably renamed in cluster. @return @integer-reply, specifically: -- `1` if `key` was renamed to `newkey`. -- `0` if `newkey` already exists. +* `1` if `key` was renamed to `newkey`. +* `0` if `newkey` already exists. @examples diff --git a/iredis/data/commands/replconf.md b/iredis/data/commands/replconf.md new file mode 100644 index 0000000..fc34549 --- /dev/null +++ b/iredis/data/commands/replconf.md @@ -0,0 +1,2 @@ +The `REPLCONF` command is an internal command. +It is used by a Redis master to configure a connected replica.
\ No newline at end of file diff --git a/iredis/data/commands/replicaof.md b/iredis/data/commands/replicaof.md index c8c839c..1c3ec93 100644 --- a/iredis/data/commands/replicaof.md +++ b/iredis/data/commands/replicaof.md @@ -1,21 +1,21 @@ -The `REPLICAOF` command can change the replication settings of a replica on the -fly. +The `REPLICAOF` command can change the replication settings of a replica on the fly. -If a Redis server is already acting as replica, the command `REPLICAOF` NO ONE -will turn off the replication, turning the Redis server into a MASTER. In the -proper form `REPLICAOF` hostname port will make the server a replica of another -server listening at the specified hostname and port. +If a Redis server is already acting as replica, the command `REPLICAOF` NO ONE will turn off the replication, turning the Redis server into a MASTER. In the proper form `REPLICAOF` hostname port will make the server a replica of another server listening at the specified hostname and port. -If a server is already a replica of some master, `REPLICAOF` hostname port will -stop the replication against the old server and start the synchronization -against the new one, discarding the old dataset. +If a server is already a replica of some master, `REPLICAOF` hostname port will stop the replication against the old server and start the synchronization against the new one, discarding the old dataset. -The form `REPLICAOF` NO ONE will stop replication, turning the server into a -MASTER, but will not discard the replication. So, if the old master stops -working, it is possible to turn the replica into a master and set the -application to use this new master in read/write. Later when the other Redis -server is fixed, it can be reconfigured to work as a replica. +The form `REPLICAOF` NO ONE will stop replication, turning the server into a MASTER, but will not discard the replication. So, if the old master stops working, it is possible to turn the replica into a master and set the application to use this new master in read/write. Later when the other Redis server is fixed, it can be reconfigured to work as a replica. @return @simple-string-reply + +@examples + +``` +> REPLICAOF NO ONE +"OK" + +> REPLICAOF 127.0.0.1 6799 +"OK" +``` diff --git a/iredis/data/commands/reset.md b/iredis/data/commands/reset.md index d381198..b3f17d2 100644 --- a/iredis/data/commands/reset.md +++ b/iredis/data/commands/reset.md @@ -1,21 +1,21 @@ -This command performs a full reset of the connection's server-side context, +This command performs a full reset of the connection's server-side context, mimicking the effect of disconnecting and reconnecting again. When the command is called from a regular client connection, it does the following: -- Discards the current `MULTI` transaction block, if one exists. -- Unwatches all keys `WATCH`ed by the connection. -- Disables `CLIENT TRACKING`, if in use. -- Sets the connection to `READWRITE` mode. -- Cancels the connection's `ASKING` mode, if previously set. -- Sets `CLIENT REPLY` to `ON`. -- Sets the protocol version to RESP2. -- `SELECT`s database 0. -- Exits `MONITOR` mode, when applicable. -- Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when +* Discards the current `MULTI` transaction block, if one exists. +* Unwatches all keys `WATCH`ed by the connection. +* Disables `CLIENT TRACKING`, if in use. +* Sets the connection to `READWRITE` mode. +* Cancels the connection's `ASKING` mode, if previously set. +* Sets `CLIENT REPLY` to `ON`. +* Sets the protocol version to RESP2. +* `SELECT`s database 0. +* Exits `MONITOR` mode, when applicable. +* Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when appropriate. -- Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when +* Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when authentication is enabled. @return diff --git a/iredis/data/commands/restore-asking.md b/iredis/data/commands/restore-asking.md new file mode 100644 index 0000000..1648805 --- /dev/null +++ b/iredis/data/commands/restore-asking.md @@ -0,0 +1,2 @@ +The `RESTORE-ASKING` command is an internal command. +It is used by a Redis cluster master during slot migration.
\ No newline at end of file diff --git a/iredis/data/commands/restore.md b/iredis/data/commands/restore.md index 50632e4..eb605be 100644 --- a/iredis/data/commands/restore.md +++ b/iredis/data/commands/restore.md @@ -4,20 +4,19 @@ provided serialized value (obtained via `DUMP`). If `ttl` is 0 the key is created without any expire, otherwise the specified expire time (in milliseconds) is set. -If the `ABSTTL` modifier was used, `ttl` should represent an absolute [Unix -timestamp][hewowu] (in milliseconds) in which the key will expire. (Redis 5.0 or -greater). +If the `ABSTTL` modifier was used, `ttl` should represent an absolute +[Unix timestamp][hewowu] (in milliseconds) in which the key will expire. [hewowu]: http://en.wikipedia.org/wiki/Unix_time For eviction purposes, you may use the `IDLETIME` or `FREQ` modifiers. See -`OBJECT` for more information (Redis 5.0 or greater). +`OBJECT` for more information. -`RESTORE` will return a "Target key name is busy" error when `key` already -exists unless you use the `REPLACE` modifier (Redis 3.0 or greater). +`!RESTORE` will return a "Target key name is busy" error when `key` already +exists unless you use the `REPLACE` modifier. -`RESTORE` checks the RDB version and data checksum. If they don't match an error -is returned. +`!RESTORE` checks the RDB version and data checksum. +If they don't match an error is returned. @return diff --git a/iredis/data/commands/role.md b/iredis/data/commands/role.md index c42fa74..c308c93 100644 --- a/iredis/data/commands/role.md +++ b/iredis/data/commands/role.md @@ -1,17 +1,13 @@ -Provide information on the role of a Redis instance in the context of -replication, by returning if the instance is currently a `master`, `slave`, or -`sentinel`. The command also returns additional information about the state of -the replication (if the role is master or slave) or the list of monitored master -names (if the role is sentinel). +Provide information on the role of a Redis instance in the context of replication, by returning if the instance is currently a `master`, `slave`, or `sentinel`. The command also returns additional information about the state of the replication (if the role is master or slave) or the list of monitored master names (if the role is sentinel). ## Output format -The command returns an array of elements. The first element is the role of the -instance, as one of the following three strings: +The command returns an array of elements. The first element is the role of +the instance, as one of the following three strings: -- "master" -- "slave" -- "sentinel" +* "master" +* "slave" +* "sentinel" The additional elements of the array depends on the role. @@ -33,12 +29,8 @@ An example of output when `ROLE` is called in a master instance: The master output is composed of the following parts: 1. The string `master`. -2. The current master replication offset, which is an offset that masters and - replicas share to understand, in partial resynchronizations, the part of the - replication stream the replicas needs to fetch to continue. -3. An array composed of three elements array representing the connected - replicas. Every sub-array contains the replica IP, port, and the last - acknowledged replication offset. +2. The current master replication offset, which is an offset that masters and replicas share to understand, in partial resynchronizations, the part of the replication stream the replicas needs to fetch to continue. +3. An array composed of three elements array representing the connected replicas. Every sub-array contains the replica IP, port, and the last acknowledged replication offset. ## Output of the command on replicas @@ -54,16 +46,11 @@ An example of output when `ROLE` is called in a replica instance: The replica output is composed of the following parts: -1. The string `slave`, because of backward compatibility (see note at the end of - this page). +1. The string `slave`, because of backward compatibility (see note at the end of this page). 2. The IP of the master. 3. The port number of the master. -4. The state of the replication from the point of view of the master, that can - be `connect` (the instance needs to connect to its master), `connecting` (the - master-replica connection is in progress), `sync` (the master and replica are - trying to perform the synchronization), `connected` (the replica is online). -5. The amount of data received from the replica so far in terms of master - replication offset. +4. The state of the replication from the point of view of the master, that can be `connect` (the instance needs to connect to its master), `connecting` (the master-replica connection is in progress), `sync` (the master and replica are trying to perform the synchronization), `connected` (the replica is online). +5. The amount of data received from the replica so far in terms of master replication offset. ## Sentinel output @@ -84,13 +71,7 @@ The sentinel output is composed of the following parts: @return -@array-reply: where the first element is one of `master`, `slave`, `sentinel` -and the additional elements are role-specific as illustrated above. - -@history - -- This command was introduced in the middle of a Redis stable release, - specifically with Redis 2.8.12. +@array-reply: where the first element is one of `master`, `slave`, `sentinel` and the additional elements are role-specific as illustrated above. @examples @@ -98,8 +79,4 @@ and the additional elements are role-specific as illustrated above. ROLE ``` -**A note about the word slave used in this man page**: Starting with Redis 5, if -not for backward compatibility, the Redis project no longer uses the word slave. -Unfortunately in this command the word slave is part of the protocol, so we'll -be able to remove such occurrences only when this API will be naturally -deprecated. +**A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/iredis/data/commands/rpop.md b/iredis/data/commands/rpop.md index 6233c03..99c863c 100644 --- a/iredis/data/commands/rpop.md +++ b/iredis/data/commands/rpop.md @@ -1,24 +1,19 @@ Removes and returns the last elements of the list stored at `key`. -By default, the command pops a single element from the end of the list. When -provided with the optional `count` argument, the reply will consist of up to -`count` elements, depending on the list's length. +By default, the command pops a single element from the end of the list. +When provided with the optional `count` argument, the reply will consist of up +to `count` elements, depending on the list's length. @return When called without the `count` argument: -@bulk-string-reply: the value of the last element, or `nil` when `key` does not -exist. +@bulk-string-reply: the value of the last element, or `nil` when `key` does not exist. When called with the `count` argument: @array-reply: list of popped elements, or `nil` when `key` does not exist. -@history - -- `>= 6.2`: Added the `count` argument. - @examples ```cli diff --git a/iredis/data/commands/rpoplpush.md b/iredis/data/commands/rpoplpush.md index 2333f84..d00e8c9 100644 --- a/iredis/data/commands/rpoplpush.md +++ b/iredis/data/commands/rpoplpush.md @@ -3,16 +3,15 @@ Atomically returns and removes the last element (tail) of the list stored at at `destination`. For example: consider `source` holding the list `a,b,c`, and `destination` -holding the list `x,y,z`. Executing `RPOPLPUSH` results in `source` holding -`a,b` and `destination` holding `c,x,y,z`. +holding the list `x,y,z`. +Executing `RPOPLPUSH` results in `source` holding `a,b` and `destination` +holding `c,x,y,z`. If `source` does not exist, the value `nil` is returned and no operation is -performed. If `source` and `destination` are the same, the operation is -equivalent to removing the last element from the list and pushing it as first -element of the list, so it can be considered as a list rotation command. - -As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please prefer `LMOVE` in -new code. +performed. +If `source` and `destination` are the same, the operation is equivalent to +removing the last element from the list and pushing it as first element of the +list, so it can be considered as a list rotation command. @return @@ -32,22 +31,24 @@ LRANGE myotherlist 0 -1 ## Pattern: Reliable queue Redis is often used as a messaging server to implement processing of background -jobs or other kinds of messaging tasks. A simple form of queue is often obtained -pushing values into a list in the producer side, and waiting for this values in -the consumer side using `RPOP` (using polling), or `BRPOP` if the client is -better served by a blocking operation. +jobs or other kinds of messaging tasks. +A simple form of queue is often obtained pushing values into a list in the +producer side, and waiting for this values in the consumer side using `RPOP` +(using polling), or `BRPOP` if the client is better served by a blocking +operation. -However in this context the obtained queue is not _reliable_ as messages can be -lost, for example in the case there is a network problem or if the consumer -crashes just after the message is received but it is still to process. +However in this context the obtained queue is not _reliable_ as messages can +be lost, for example in the case there is a network problem or if the consumer +crashes just after the message is received but before it can be processed. `RPOPLPUSH` (or `BRPOPLPUSH` for the blocking variant) offers a way to avoid this problem: the consumer fetches the message and at the same time pushes it -into a _processing_ list. It will use the `LREM` command in order to remove the -message from the _processing_ list once the message has been processed. +into a _processing_ list. +It will use the `LREM` command in order to remove the message from the +_processing_ list once the message has been processed. An additional client may monitor the _processing_ list for items that remain -there for too much time, and will push those timed out items into the queue +there for too much time, pushing timed out items into the queue again if needed. ## Pattern: Circular list @@ -57,17 +58,17 @@ all the elements of an N-elements list, one after the other, in O(N) without transferring the full list from the server to the client using a single `LRANGE` operation. -The above pattern works even if the following two conditions: +The above pattern works even if one or both of the following conditions occur: -- There are multiple clients rotating the list: they'll fetch different - elements, until all the elements of the list are visited, and the process +* There are multiple clients rotating the list: they'll fetch different + elements, until all the elements of the list are visited, and the process restarts. -- Even if other clients are actively pushing new items at the end of the list. +* Other clients are actively pushing new items at the end of the list. The above makes it very simple to implement a system where a set of items must -be processed by N workers continuously as fast as possible. An example is a -monitoring system that must check that a set of web sites are reachable, with -the smallest delay possible, using a number of parallel workers. +be processed by N workers continuously as fast as possible. +An example is a monitoring system that must check that a set of web sites are +reachable, with the smallest delay possible, using a number of parallel workers. Note that this implementation of workers is trivially scalable and reliable, because even if a message is lost the item is still in the queue and will be diff --git a/iredis/data/commands/rpush.md b/iredis/data/commands/rpush.md index 14a796b..def4ee1 100644 --- a/iredis/data/commands/rpush.md +++ b/iredis/data/commands/rpush.md @@ -1,23 +1,19 @@ -Insert all the specified values at the tail of the list stored at `key`. If -`key` does not exist, it is created as empty list before performing the push -operation. When `key` holds a value that is not a list, an error is returned. +Insert all the specified values at the tail of the list stored at `key`. +If `key` does not exist, it is created as empty list before performing the push +operation. +When `key` holds a value that is not a list, an error is returned. It is possible to push multiple elements using a single command call just -specifying multiple arguments at the end of the command. Elements are inserted -one after the other to the tail of the list, from the leftmost element to the -rightmost element. So for instance the command `RPUSH mylist a b c` will result -into a list containing `a` as first element, `b` as second element and `c` as -third element. +specifying multiple arguments at the end of the command. +Elements are inserted one after the other to the tail of the list, from the +leftmost element to the rightmost element. +So for instance the command `RPUSH mylist a b c` will result into a list +containing `a` as first element, `b` as second element and `c` as third element. @return @integer-reply: the length of the list after the push operation. -@history - -- `>= 2.4`: Accepts multiple `element` arguments. In Redis versions older than - 2.4 it was possible to push a single value per command. - @examples ```cli diff --git a/iredis/data/commands/rpushx.md b/iredis/data/commands/rpushx.md index 0345367..daab019 100644 --- a/iredis/data/commands/rpushx.md +++ b/iredis/data/commands/rpushx.md @@ -1,16 +1,12 @@ Inserts specified values at the tail of the list stored at `key`, only if `key` -already exists and holds a list. In contrary to `RPUSH`, no operation will be -performed when `key` does not yet exist. +already exists and holds a list. +In contrary to `RPUSH`, no operation will be performed when `key` does not yet +exist. @return @integer-reply: the length of the list after the push operation. -@history - -- `>= 4.0`: Accepts multiple `element` arguments. In Redis versions older than - 4.0 it was possible to push a single value per command. - @examples ```cli diff --git a/iredis/data/commands/sadd.md b/iredis/data/commands/sadd.md index df12d6a..f8232bb 100644 --- a/iredis/data/commands/sadd.md +++ b/iredis/data/commands/sadd.md @@ -1,6 +1,7 @@ -Add the specified members to the set stored at `key`. Specified members that are -already a member of this set are ignored. If `key` does not exist, a new set is -created before adding the specified members. +Add the specified members to the set stored at `key`. +Specified members that are already a member of this set are ignored. +If `key` does not exist, a new set is created before adding the specified +members. An error is returned when the value stored at `key` is not a set. @@ -9,11 +10,6 @@ An error is returned when the value stored at `key` is not a set. @integer-reply: the number of elements that were added to the set, not including all the elements already present in the set. -@history - -- `>= 2.4`: Accepts multiple `member` arguments. Redis versions before 2.4 are - only able to add a single member per call. - @examples ```cli diff --git a/iredis/data/commands/save.md b/iredis/data/commands/save.md index 540dc7a..c66c5e9 100644 --- a/iredis/data/commands/save.md +++ b/iredis/data/commands/save.md @@ -3,10 +3,11 @@ _point in time_ snapshot of all the data inside the Redis instance, in the form of an RDB file. You almost never want to call `SAVE` in production environments where it will -block all the other clients. Instead usually `BGSAVE` is used. However in case -of issues preventing Redis to create the background saving child (for instance -errors in the fork(2) system call), the `SAVE` command can be a good last resort -to perform the dump of the latest dataset. +block all the other clients. +Instead usually `BGSAVE` is used. +However in case of issues preventing Redis to create the background saving child +(for instance errors in the fork(2) system call), the `SAVE` command can be a +good last resort to perform the dump of the latest dataset. Please refer to the [persistence documentation][tp] for detailed information. diff --git a/iredis/data/commands/scan.md b/iredis/data/commands/scan.md index 774425a..fd5924f 100644 --- a/iredis/data/commands/scan.md +++ b/iredis/data/commands/scan.md @@ -1,36 +1,21 @@ -The `SCAN` command and the closely related commands `SSCAN`, `HSCAN` and `ZSCAN` -are used in order to incrementally iterate over a collection of elements. - -- `SCAN` iterates the set of keys in the currently selected Redis database. -- `SSCAN` iterates elements of Sets types. -- `HSCAN` iterates fields of Hash types and their associated values. -- `ZSCAN` iterates elements of Sorted Set types and their associated scores. - -Since these commands allow for incremental iteration, returning only a small -number of elements per call, they can be used in production without the downside -of commands like `KEYS` or `SMEMBERS` that may block the server for a long time -(even several seconds) when called against big collections of keys or elements. - -However while blocking commands like `SMEMBERS` are able to provide all the -elements that are part of a Set in a given moment, The SCAN family of commands -only offer limited guarantees about the returned elements since the collection -that we incrementally iterate can change during the iteration process. - -Note that `SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` all work very similarly, so this -documentation covers all the four commands. However an obvious difference is -that in the case of `SSCAN`, `HSCAN` and `ZSCAN` the first argument is the name -of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does -not need any key name argument as it iterates keys in the current database, so -the iterated object is the database itself. +The `SCAN` command and the closely related commands `SSCAN`, `HSCAN` and `ZSCAN` are used in order to incrementally iterate over a collection of elements. + +* `SCAN` iterates the set of keys in the currently selected Redis database. +* `SSCAN` iterates elements of Sets types. +* `HSCAN` iterates fields of Hash types and their associated values. +* `ZSCAN` iterates elements of Sorted Set types and their associated scores. + +Since these commands allow for incremental iteration, returning only a small number of elements per call, they can be used in production without the downside of commands like `KEYS` or `SMEMBERS` that may block the server for a long time (even several seconds) when called against big collections of keys or elements. + +However while blocking commands like `SMEMBERS` are able to provide all the elements that are part of a Set in a given moment, The SCAN family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. + +Note that `SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` all work very similarly, so this documentation covers all the four commands. However an obvious difference is that in the case of `SSCAN`, `HSCAN` and `ZSCAN` the first argument is the name of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does not need any key name argument as it iterates keys in the current database, so the iterated object is the database itself. ## SCAN basic usage -SCAN is a cursor based iterator. This means that at every call of the command, -the server returns an updated cursor that the user needs to use as the cursor -argument in the next call. +SCAN is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor that the user needs to use as the cursor argument in the next call. -An iteration starts when the cursor is set to 0, and terminates when the cursor -returned by the server is 0. The following is an example of SCAN iteration: +An iteration starts when the cursor is set to 0, and terminates when the cursor returned by the server is 0. The following is an example of SCAN iteration: ``` redis 127.0.0.1:6379> scan 0 @@ -59,96 +44,47 @@ redis 127.0.0.1:6379> scan 17 9) "key:11" ``` -In the example above, the first call uses zero as a cursor, to start the -iteration. The second call uses the cursor returned by the previous call as the -first element of the reply, that is, 17. +In the example above, the first call uses zero as a cursor, to start the iteration. The second call uses the cursor returned by the previous call as the first element of the reply, that is, 17. -As you can see the **SCAN return value** is an array of two values: the first -value is the new cursor to use in the next call, the second value is an array of -elements. +As you can see the **SCAN return value** is an array of two values: the first value is the new cursor to use in the next call, the second value is an array of elements. -Since in the second call the returned cursor is 0, the server signaled to the -caller that the iteration finished, and the collection was completely explored. -Starting an iteration with a cursor value of 0, and calling `SCAN` until the -returned cursor is 0 again is called a **full iteration**. +Since in the second call the returned cursor is 0, the server signaled to the caller that the iteration finished, and the collection was completely explored. Starting an iteration with a cursor value of 0, and calling `SCAN` until the returned cursor is 0 again is called a **full iteration**. ## Scan guarantees -The `SCAN` command, and the other commands in the `SCAN` family, are able to -provide to the user a set of guarantees associated to full iterations. - -- A full iteration always retrieves all the elements that were present in the - collection from the start to the end of a full iteration. This means that if a - given element is inside the collection when an iteration is started, and is - still there when an iteration terminates, then at some point `SCAN` returned - it to the user. -- A full iteration never returns any element that was NOT present in the - collection from the start to the end of a full iteration. So if an element was - removed before the start of an iteration, and is never added back to the - collection for all the time an iteration lasts, `SCAN` ensures that this - element will never be returned. - -However because `SCAN` has very little state associated (just the cursor) it has -the following drawbacks: - -- A given element may be returned multiple times. It is up to the application to - handle the case of duplicated elements, for example only using the returned - elements in order to perform operations that are safe when re-applied multiple - times. -- Elements that were not constantly present in the collection during a full - iteration, may be returned or not: it is undefined. +The `SCAN` command, and the other commands in the `SCAN` family, are able to provide to the user a set of guarantees associated to full iterations. + +* A full iteration always retrieves all the elements that were present in the collection from the start to the end of a full iteration. This means that if a given element is inside the collection when an iteration is started, and is still there when an iteration terminates, then at some point `SCAN` returned it to the user. +* A full iteration never returns any element that was NOT present in the collection from the start to the end of a full iteration. So if an element was removed before the start of an iteration, and is never added back to the collection for all the time an iteration lasts, `SCAN` ensures that this element will never be returned. + +However because `SCAN` has very little state associated (just the cursor) it has the following drawbacks: + +* A given element may be returned multiple times. It is up to the application to handle the case of duplicated elements, for example only using the returned elements in order to perform operations that are safe when re-applied multiple times. +* Elements that were not constantly present in the collection during a full iteration, may be returned or not: it is undefined. ## Number of elements returned at every SCAN call -`SCAN` family functions do not guarantee that the number of elements returned -per call are in a given range. The commands are also allowed to return zero -elements, and the client should not consider the iteration complete as long as -the returned cursor is not zero. +`SCAN` family functions do not guarantee that the number of elements returned per call are in a given range. The commands are also allowed to return zero elements, and the client should not consider the iteration complete as long as the returned cursor is not zero. -However the number of returned elements is reasonable, that is, in practical -terms SCAN may return a maximum number of elements in the order of a few tens of -elements when iterating a large collection, or may return all the elements of -the collection in a single call when the iterated collection is small enough to -be internally represented as an encoded data structure (this happens for small -sets, hashes and sorted sets). +However the number of returned elements is reasonable, that is, in practical terms SCAN may return a maximum number of elements in the order of a few tens of elements when iterating a large collection, or may return all the elements of the collection in a single call when the iterated collection is small enough to be internally represented as an encoded data structure (this happens for small sets, hashes and sorted sets). -However there is a way for the user to tune the order of magnitude of the number -of returned elements per call using the **COUNT** option. +However there is a way for the user to tune the order of magnitude of the number of returned elements per call using the **COUNT** option. ## The COUNT option -While `SCAN` does not provide guarantees about the number of elements returned -at every iteration, it is possible to empirically adjust the behavior of `SCAN` -using the **COUNT** option. Basically with COUNT the user specified the _amount -of work that should be done at every call in order to retrieve elements from the -collection_. This is **just a hint** for the implementation, however generally -speaking this is what you could expect most of the times from the -implementation. - -- The default COUNT value is 10. -- When iterating the key space, or a Set, Hash or Sorted Set that is big enough - to be represented by a hash table, assuming no **MATCH** option is used, the - server will usually return _count_ or a bit more than _count_ elements per - call. Please check the _why SCAN may return all the elements at once_ section - later in this document. -- When iterating Sets encoded as intsets (small sets composed of just integers), - or Hashes and Sorted Sets encoded as ziplists (small hashes and sets composed - of small individual values), usually all the elements are returned in the - first `SCAN` call regardless of the COUNT value. - -Important: **there is no need to use the same COUNT value** for every iteration. -The caller is free to change the count from one iteration to the other as -required, as long as the cursor passed in the next call is the one obtained in -the previous call to the command. +While `SCAN` does not provide guarantees about the number of elements returned at every iteration, it is possible to empirically adjust the behavior of `SCAN` using the **COUNT** option. Basically with COUNT the user specified the *amount of work that should be done at every call in order to retrieve elements from the collection*. This is **just a hint** for the implementation, however generally speaking this is what you could expect most of the times from the implementation. + +* The default COUNT value is 10. +* When iterating the key space, or a Set, Hash or Sorted Set that is big enough to be represented by a hash table, assuming no **MATCH** option is used, the server will usually return *count* or a bit more than *count* elements per call. Please check the *why SCAN may return all the elements at once* section later in this document. +* When iterating Sets encoded as intsets (small sets composed of just integers), or Hashes and Sorted Sets encoded as ziplists (small hashes and sets composed of small individual values), usually all the elements are returned in the first `SCAN` call regardless of the COUNT value. + +Important: **there is no need to use the same COUNT value** for every iteration. The caller is free to change the count from one iteration to the other as required, as long as the cursor passed in the next call is the one obtained in the previous call to the command. ## The MATCH option -It is possible to only iterate elements matching a given glob-style pattern, -similarly to the behavior of the `KEYS` command that takes a pattern as only -argument. +It is possible to only iterate elements matching a given glob-style pattern, similarly to the behavior of the `KEYS` command that takes a pattern as only argument. -To do so, just append the `MATCH <pattern>` arguments at the end of the `SCAN` -command (it works with all the SCAN family commands). +To do so, just append the `MATCH <pattern>` arguments at the end of the `SCAN` command (it works with all the SCAN family commands). This is an example of iteration using **MATCH**: @@ -163,11 +99,7 @@ redis 127.0.0.1:6379> sscan myset 0 match f* redis 127.0.0.1:6379> ``` -It is important to note that the **MATCH** filter is applied after elements are -retrieved from the collection, just before returning data to the client. This -means that if the pattern matches very little elements inside the collection, -`SCAN` will likely return no elements in most iterations. An example is shown -below: +It is important to note that the **MATCH** filter is applied after elements are retrieved from the collection, just before returning data to the client. This means that if the pattern matches very little elements inside the collection, `SCAN` will likely return no elements in most iterations. An example is shown below: ``` redis 127.0.0.1:6379> scan 0 MATCH *11* @@ -205,22 +137,14 @@ redis 127.0.0.1:6379> scan 176 MATCH *11* COUNT 1000 redis 127.0.0.1:6379> ``` -As you can see most of the calls returned zero elements, but the last call where -a COUNT of 1000 was used in order to force the command to do more scanning for -that iteration. +As you can see most of the calls returned zero elements, but the last call where a COUNT of 1000 was used in order to force the command to do more scanning for that iteration. + ## The TYPE option -As of version 6.0 you can use this option to ask `SCAN` to only return objects -that match a given `type`, allowing you to iterate through the database looking -for keys of a specific type. The **TYPE** option is only available on the -whole-database `SCAN`, not `HSCAN` or `ZSCAN` etc. +You can use the `!TYPE` option to ask `SCAN` to only return objects that match a given `type`, allowing you to iterate through the database looking for keys of a specific type. The **TYPE** option is only available on the whole-database `SCAN`, not `HSCAN` or `ZSCAN` etc. -The `type` argument is the same string name that the `TYPE` command returns. -Note a quirk where some Redis types, such as GeoHashes, HyperLogLogs, Bitmaps, -and Bitfields, may internally be implemented using other Redis types, such as a -string or zset, so can't be distinguished from other keys of that same type by -`SCAN`. For example, a ZSET and GEOHASH: +The `type` argument is the same string name that the `TYPE` command returns. Note a quirk where some Redis types, such as GeoHashes, HyperLogLogs, Bitmaps, and Bitfields, may internally be implemented using other Redis types, such as a string or zset, so can't be distinguished from other keys of that same type by `SCAN`. For example, a ZSET and GEOHASH: ``` redis 127.0.0.1:6379> GEOADD geokey 0 0 value @@ -237,93 +161,47 @@ redis 127.0.0.1:6379> SCAN 0 TYPE zset 2) "zkey" ``` -It is important to note that the **TYPE** filter is also applied after elements -are retrieved from the database, so the option does not reduce the amount of -work the server has to do to complete a full iteration, and for rare types you -may receive no elements in many iterations. +It is important to note that the **TYPE** filter is also applied after elements are retrieved from the database, so the option does not reduce the amount of work the server has to do to complete a full iteration, and for rare types you may receive no elements in many iterations. ## Multiple parallel iterations -It is possible for an infinite number of clients to iterate the same collection -at the same time, as the full state of the iterator is in the cursor, that is -obtained and returned to the client at every call. No server side state is taken -at all. +It is possible for an infinite number of clients to iterate the same collection at the same time, as the full state of the iterator is in the cursor, that is obtained and returned to the client at every call. No server side state is taken at all. ## Terminating iterations in the middle -Since there is no state server side, but the full state is captured by the -cursor, the caller is free to terminate an iteration half-way without signaling -this to the server in any way. An infinite number of iterations can be started -and never terminated without any issue. +Since there is no state server side, but the full state is captured by the cursor, the caller is free to terminate an iteration half-way without signaling this to the server in any way. An infinite number of iterations can be started and never terminated without any issue. ## Calling SCAN with a corrupted cursor -Calling `SCAN` with a broken, negative, out of range, or otherwise invalid -cursor, will result into undefined behavior but never into a crash. What will be -undefined is that the guarantees about the returned elements can no longer be -ensured by the `SCAN` implementation. +Calling `SCAN` with a broken, negative, out of range, or otherwise invalid cursor, will result into undefined behavior but never into a crash. What will be undefined is that the guarantees about the returned elements can no longer be ensured by the `SCAN` implementation. The only valid cursors to use are: -- The cursor value of 0 when starting an iteration. -- The cursor returned by the previous call to SCAN in order to continue the - iteration. +* The cursor value of 0 when starting an iteration. +* The cursor returned by the previous call to SCAN in order to continue the iteration. ## Guarantee of termination -The `SCAN` algorithm is guaranteed to terminate only if the size of the iterated -collection remains bounded to a given maximum size, otherwise iterating a -collection that always grows may result into `SCAN` to never terminate a full -iteration. +The `SCAN` algorithm is guaranteed to terminate only if the size of the iterated collection remains bounded to a given maximum size, otherwise iterating a collection that always grows may result into `SCAN` to never terminate a full iteration. -This is easy to see intuitively: if the collection grows there is more and more -work to do in order to visit all the possible elements, and the ability to -terminate the iteration depends on the number of calls to `SCAN` and its COUNT -option value compared with the rate at which the collection grows. +This is easy to see intuitively: if the collection grows there is more and more work to do in order to visit all the possible elements, and the ability to terminate the iteration depends on the number of calls to `SCAN` and its COUNT option value compared with the rate at which the collection grows. ## Why SCAN may return all the items of an aggregate data type in a single call? -In the `COUNT` option documentation, we state that sometimes this family of -commands may return all the elements of a Set, Hash or Sorted Set at once in a -single call, regardless of the `COUNT` option value. The reason why this happens -is that the cursor-based iterator can be implemented, and is useful, only when -the aggregate data type that we are scanning is represented as an hash table. -However Redis uses a [memory optimization](/topics/memory-optimization) where -small aggregate data types, until they reach a given amount of items or a given -max size of single elements, are represented using a compact single-allocation -packed encoding. When this is the case, `SCAN` has no meaningful cursor to -return, and must iterate the whole data structure at once, so the only sane -behavior it has is to return everything in a call. - -However once the data structures are bigger and are promoted to use real hash -tables, the `SCAN` family of commands will resort to the normal behavior. Note -that since this special behavior of returning all the elements is true only for -small aggregates, it has no effects on the command complexity or latency. -However the exact limits to get converted into real hash tables are -[user configurable](/topics/memory-optimization), so the maximum number of -elements you can see returned in a single call depends on how big an aggregate -data type could be and still use the packed representation. - -Also note that this behavior is specific of `SSCAN`, `HSCAN` and `ZSCAN`. `SCAN` -itself never shows this behavior because the key space is always represented by -hash tables. +In the `COUNT` option documentation, we state that sometimes this family of commands may return all the elements of a Set, Hash or Sorted Set at once in a single call, regardless of the `COUNT` option value. The reason why this happens is that the cursor-based iterator can be implemented, and is useful, only when the aggregate data type that we are scanning is represented as a hash table. However Redis uses a [memory optimization](/topics/memory-optimization) where small aggregate data types, until they reach a given amount of items or a given max size of single elements, are represented using a compact single-allocation packed encoding. When this is the case, `SCAN` has no meaningful cursor to return, and must iterate the whole data structure at once, so the only sane behavior it has is to return everything in a call. -## Return value +However once the data structures are bigger and are promoted to use real hash tables, the `SCAN` family of commands will resort to the normal behavior. Note that since this special behavior of returning all the elements is true only for small aggregates, it has no effects on the command complexity or latency. However the exact limits to get converted into real hash tables are [user configurable](/topics/memory-optimization), so the maximum number of elements you can see returned in a single call depends on how big an aggregate data type could be and still use the packed representation. -`SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` return a two elements multi-bulk reply, -where the first element is a string representing an unsigned 64 bit number (the -cursor), and the second element is a multi-bulk with an array of elements. +Also note that this behavior is specific of `SSCAN`, `HSCAN` and `ZSCAN`. `SCAN` itself never shows this behavior because the key space is always represented by hash tables. -- `SCAN` array of elements is a list of keys. -- `SSCAN` array of elements is a list of Set members. -- `HSCAN` array of elements contain two elements, a field and a value, for every - returned element of the Hash. -- `ZSCAN` array of elements contain two elements, a member and its associated - score, for every returned element of the sorted set. +## Return value -@history +`SCAN`, `SSCAN`, `HSCAN` and `ZSCAN` return a two elements multi-bulk reply, where the first element is a string representing an unsigned 64 bit number (the cursor), and the second element is a multi-bulk with an array of elements. -- `>= 6.0`: Supports the `TYPE` subcommand. +* `SCAN` array of elements is a list of keys. +* `SSCAN` array of elements is a list of Set members. +* `HSCAN` array of elements contain two elements, a field and a value, for every returned element of the Hash. +* `ZSCAN` array of elements contain two elements, a member and its associated score, for every returned element of the sorted set. ## Additional examples diff --git a/iredis/data/commands/script-debug.md b/iredis/data/commands/script-debug.md index 52f8194..3779ed5 100644 --- a/iredis/data/commands/script-debug.md +++ b/iredis/data/commands/script-debug.md @@ -15,12 +15,13 @@ finishes, so debugging can be restarted using the same initial state. The alternative synchronous debug mode blocks the server while the debugging session is active and retains all changes to the data set once it ends. -- `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are - discarded). -- `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes - to data). -- `NO`. Disables scripts debug mode. +* `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are discarded). +* `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to data). +* `NO`. Disables scripts debug mode. + +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @return @simple-string-reply: `OK`. + diff --git a/iredis/data/commands/script-exists.md b/iredis/data/commands/script-exists.md index d27d771..758660c 100644 --- a/iredis/data/commands/script-exists.md +++ b/iredis/data/commands/script-exists.md @@ -2,17 +2,17 @@ Returns information about the existence of the scripts in the script cache. This command accepts one or more SHA1 digests and returns a list of ones or zeros to signal if the scripts are already defined or not inside the script -cache. This can be useful before a pipelining operation to ensure that scripts -are loaded (and if not, to load them using `SCRIPT LOAD`) so that the pipelining +cache. +This can be useful before a pipelining operation to ensure that scripts are +loaded (and if not, to load them using `SCRIPT LOAD`) so that the pipelining operation can be performed solely using `EVALSHA` instead of `EVAL` to save bandwidth. -Please refer to the `EVAL` documentation for detailed information about Redis -Lua scripting. +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @return -@array-reply The command returns an array of integers that correspond to the -specified SHA1 digest arguments. For every corresponding SHA1 digest of a script -that actually exists in the script cache, an 1 is returned, otherwise 0 is -returned. +@array-reply The command returns an array of integers that correspond to +the specified SHA1 digest arguments. +For every corresponding SHA1 digest of a script that actually exists in the +script cache, a 1 is returned, otherwise 0 is returned. diff --git a/iredis/data/commands/script-flush.md b/iredis/data/commands/script-flush.md index bc5a545..705d014 100644 --- a/iredis/data/commands/script-flush.md +++ b/iredis/data/commands/script-flush.md @@ -1,23 +1,19 @@ Flush the Lua scripts cache. -Please refer to the `EVAL` documentation for detailed information about Redis -Lua scripting. +By default, `SCRIPT FLUSH` will synchronously flush the cache. +Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to "yes" changes the default flush mode to asynchronous. -By default, `SCRIPT FLUSH` will synchronously flush the cache. Starting with -Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to -"yes" changes the default flush mode to asynchronous. +It is possible to use one of the following modifiers to dictate the flushing mode explicitly: -It is possible to use one of the following modifiers to dictate the flushing -mode explicitly: +* `ASYNC`: flushes the cache asynchronously +* `!SYNC`: flushes the cache synchronously -- `ASYNC`: flushes the cache asynchronously -- `!SYNC`: flushes the cache synchronously +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @return @simple-string-reply -@history +## Behavior change history -- `>= 6.2.0`: Added the `ASYNC` and `!SYNC` flushing mode modifiers, as well as - the **lazyfree-lazy-user-flush** configuration directive. +* `>= 6.2.0`: Default flush behavior now configurable by the **lazyfree-lazy-user-flush** configuration directive.
\ No newline at end of file diff --git a/iredis/data/commands/script-help.md b/iredis/data/commands/script-help.md new file mode 100644 index 0000000..02b7163 --- /dev/null +++ b/iredis/data/commands/script-help.md @@ -0,0 +1,5 @@ +The `SCRIPT HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/script-kill.md b/iredis/data/commands/script-kill.md index 225798b..5b4c646 100644 --- a/iredis/data/commands/script-kill.md +++ b/iredis/data/commands/script-kill.md @@ -1,18 +1,18 @@ -Kills the currently executing Lua script, assuming no write operation was yet +Kills the currently executing `EVAL` script, assuming no write operation was yet performed by the script. This command is mainly useful to kill a script that is running for too much -time(for instance because it entered an infinite loop because of a bug). The -script will be killed and the client currently blocked into EVAL will see the -command returning with an error. +time(for instance, because it entered an infinite loop because of a bug). +The script will be killed, and the client currently blocked into EVAL will see +the command returning with an error. -If the script already performed write operations it can not be killed in this -way because it would violate Lua script atomicity contract. In such a case only -`SHUTDOWN NOSAVE` is able to kill the script, killing the Redis process in an -hard way preventing it to persist with half-written information. +If the script has already performed write operations, it can not be killed in this +way because it would violate Lua's script atomicity contract. +In such a case, only `SHUTDOWN NOSAVE` can kill the script, killing +the Redis process in a hard way and preventing it from persisting with half-written +information. -Please refer to the `EVAL` documentation for detailed information about Redis -Lua scripting. +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @return diff --git a/iredis/data/commands/script-load.md b/iredis/data/commands/script-load.md index 839b247..ed5ab2d 100644 --- a/iredis/data/commands/script-load.md +++ b/iredis/data/commands/script-load.md @@ -1,18 +1,17 @@ -Load a script into the scripts cache, without executing it. After the specified -command is loaded into the script cache it will be callable using `EVALSHA` with -the correct SHA1 digest of the script, exactly like after the first successful -invocation of `EVAL`. +Load a script into the scripts cache, without executing it. +After the specified command is loaded into the script cache it will be callable +using `EVALSHA` with the correct SHA1 digest of the script, exactly like after +the first successful invocation of `EVAL`. -The script is guaranteed to stay in the script cache forever (unless -`SCRIPT FLUSH` is called). +The script is guaranteed to stay in the script cache forever (unless `SCRIPT +FLUSH` is called). The command works in the same way even if the script was already present in the script cache. -Please refer to the `EVAL` documentation for detailed information about Redis -Lua scripting. +For more information about `EVAL` scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). @return -@bulk-string-reply This command returns the SHA1 digest of the script added into -the script cache. +@bulk-string-reply This command returns the SHA1 digest of the script added into the +script cache. diff --git a/iredis/data/commands/script.md b/iredis/data/commands/script.md new file mode 100644 index 0000000..a7a41d8 --- /dev/null +++ b/iredis/data/commands/script.md @@ -0,0 +1,3 @@ +This is a container command for script management commands. + +To see the list of available commands you can call `SCRIPT HELP`. diff --git a/iredis/data/commands/select.md b/iredis/data/commands/select.md index ff366c6..9ebc04e 100644 --- a/iredis/data/commands/select.md +++ b/iredis/data/commands/select.md @@ -1,26 +1,13 @@ Select the Redis logical database having the specified zero-based numeric index. New connections always use the database 0. -Selectable Redis databases are a form of namespacing: all databases are still -persisted in the same RDB / AOF file. However different databases can have keys -with the same name, and commands like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` work on -specific databases. +Selectable Redis databases are a form of namespacing: all databases are still persisted in the same RDB / AOF file. However different databases can have keys with the same name, and commands like `FLUSHDB`, `SWAPDB` or `RANDOMKEY` work on specific databases. -In practical terms, Redis databases should be used to separate different keys -belonging to the same application (if needed), and not to use a single Redis -instance for multiple unrelated applications. +In practical terms, Redis databases should be used to separate different keys belonging to the same application (if needed), and not to use a single Redis instance for multiple unrelated applications. -When using Redis Cluster, the `SELECT` command cannot be used, since Redis -Cluster only supports database zero. In the case of a Redis Cluster, having -multiple databases would be useless and an unnecessary source of complexity. -Commands operating atomically on a single database would not be possible with -the Redis Cluster design and goals. +When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of a Redis Cluster, having multiple databases would be useless and an unnecessary source of complexity. Commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. -Since the currently selected database is a property of the connection, clients -should track the currently selected database and re-select it on reconnection. -While there is no command in order to query the selected database in the current -connection, the `CLIENT LIST` output shows, for each client, the currently -selected database. +Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the `CLIENT LIST` output shows, for each client, the currently selected database. @return diff --git a/iredis/data/commands/set.md b/iredis/data/commands/set.md index 8f5c302..6f1ceca 100644 --- a/iredis/data/commands/set.md +++ b/iredis/data/commands/set.md @@ -1,49 +1,34 @@ -Set `key` to hold the string `value`. If `key` already holds a value, it is -overwritten, regardless of its type. Any previous time to live associated with -the key is discarded on successful `SET` operation. +Set `key` to hold the string `value`. +If `key` already holds a value, it is overwritten, regardless of its type. +Any previous time to live associated with the key is discarded on successful `SET` operation. ## Options The `SET` command supports a set of options that modify its behavior: -- `EX` _seconds_ -- Set the specified expire time, in seconds. -- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds. -- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key - will expire, in seconds. -- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the - key will expire, in milliseconds. -- `NX` -- Only set the key if it does not already exist. -- `XX` -- Only set the key if it already exist. -- `KEEPTTL` -- Retain the time to live associated with the key. -- `GET` -- Return the old string stored at key, or nil if key did not exist. An - error is returned and `SET` aborted if the value stored at key is not a - string. - -Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, -`GETSET`, it is possible that in future versions of Redis these commands will be -deprecated and finally removed. +* `EX` *seconds* -- Set the specified expire time, in seconds. +* `PX` *milliseconds* -- Set the specified expire time, in milliseconds. +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. +* `NX` -- Only set the key if it does not already exist. +* `XX` -- Only set the key if it already exist. +* `KEEPTTL` -- Retain the time to live associated with the key. +* `!GET` -- Return the old string stored at key, or nil if key did not exist. An error is returned and `SET` aborted if the value stored at key is not a string. + +Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, `GETSET`, it is possible that in future versions of Redis these commands will be deprecated and finally removed. @return @simple-string-reply: `OK` if `SET` was executed correctly. -@nil-reply: `(nil)` if the `SET` operation was not performed because the user -specified the `NX` or `XX` option but the condition was not met. +@nil-reply: `(nil)` if the `SET` operation was not performed because the user specified the `NX` or `XX` option but the condition was not met. -If the command is issued with the `GET` option, the above does not apply. It -will instead reply as follows, regardless if the `SET` was actually performed: +If the command is issued with the `!GET` option, the above does not apply. It will instead reply as follows, regardless if the `SET` was actually performed: @bulk-string-reply: the old string value stored at key. @nil-reply: `(nil)` if the key did not exist. -@history - -- `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options. -- `>= 6.0`: Added the `KEEPTTL` option. -- `>= 6.2`: Added the `GET`, `EXAT` and `PXAT` option. -- `>= 7.0`: Allowed the `NX` and `GET` options to be used together. - @examples ```cli @@ -55,28 +40,20 @@ SET anotherkey "will expire in a minute" EX 60 ## Patterns -**Note:** The following pattern is discouraged in favor of -[the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit -more complex to implement, but offers better guarantees and is fault tolerant. +**Note:** The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. -The command `SET resource-name anystring NX EX max-lock-time` is a simple way to -implement a locking system with Redis. +The command `SET resource-name anystring NX EX max-lock-time` is a simple way to implement a locking system with Redis. -A client can acquire the lock if the above command returns `OK` (or retry after -some time if the command returns Nil), and remove the lock just using `DEL`. +A client can acquire the lock if the above command returns `OK` (or retry after some time if the command returns Nil), and remove the lock just using `DEL`. The lock will be auto-released after the expire time is reached. -It is possible to make this system more robust modifying the unlock schema as -follows: +It is possible to make this system more robust modifying the unlock schema as follows: -- Instead of setting a fixed string, set a non-guessable large random string, - called token. -- Instead of releasing the lock with `DEL`, send a script that only removes the - key if the value matches. +* Instead of setting a fixed string, set a non-guessable large random string, called token. +* Instead of releasing the lock with `DEL`, send a script that only removes the key if the value matches. -This avoids that a client will try to release the lock after the expire time -deleting the key created by another client that acquired the lock later. +This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later. An example of unlock script would be similar to the following: diff --git a/iredis/data/commands/setbit.md b/iredis/data/commands/setbit.md index 6f3a553..e0b440b 100644 --- a/iredis/data/commands/setbit.md +++ b/iredis/data/commands/setbit.md @@ -1,23 +1,24 @@ Sets or clears the bit at _offset_ in the string value stored at _key_. The bit is either set or cleared depending on _value_, which can be either 0 or - 1. -When _key_ does not exist, a new string value is created. The string is grown to -make sure it can hold a bit at _offset_. The _offset_ argument is required to be -greater than or equal to 0, and smaller than 2^32 (this limits bitmaps to -512MB). When the string at _key_ is grown, added bits are set to 0. +When _key_ does not exist, a new string value is created. +The string is grown to make sure it can hold a bit at _offset_. +The _offset_ argument is required to be greater than or equal to 0, and smaller +than 2^32 (this limits bitmaps to 512MB). +When the string at _key_ is grown, added bits are set to 0. **Warning**: When setting the last possible bit (_offset_ equal to 2^32 -1) and the string value stored at _key_ does not yet hold a string value, or holds a small string value, Redis needs to allocate all intermediate memory which can -block the server for some time. On a 2010 MacBook Pro, setting bit number 2^32 --1 (512MB allocation) takes ~300ms, setting bit number 2^30 -1 (128MB -allocation) takes ~80ms, setting bit number 2^28 -1 (32MB allocation) takes -~30ms and setting bit number 2^26 -1 (8MB allocation) takes ~8ms. Note that once -this first allocation is done, subsequent calls to `SETBIT` for the same _key_ -will not have the allocation overhead. +block the server for some time. +On a 2010 MacBook Pro, setting bit number 2^32 -1 (512MB allocation) takes +~300ms, setting bit number 2^30 -1 (128MB allocation) takes ~80ms, setting bit +number 2^28 -1 (32MB allocation) takes ~30ms and setting bit number 2^26 -1 (8MB +allocation) takes ~8ms. +Note that once this first allocation is done, subsequent calls to `SETBIT` for +the same _key_ will not have the allocation overhead. @return @@ -40,13 +41,14 @@ be set. However, so as an optimization you can use a single `SET` command to set the entire bitmap. Bitmaps are not an actual data type, but a set of bit-oriented operations -defined on the String type (for more information refer to the [Bitmaps section -of the Data Types Introduction page][ti]). This means that bitmaps can be used -with string commands, and most importantly with `SET` and `GET`. +defined on the String type (for more information refer to the +[Bitmaps section of the Data Types Introduction page][ti]). This means that +bitmaps can be used with string commands, and most importantly with `SET` and +`GET`. Because Redis' strings are binary-safe, a bitmap is trivially encoded as a bytes -stream. The first byte of the string corresponds to offsets 0..7 of the bitmap, -the second byte to the 8..15 range, and so forth. +stream. The first byte of the string corresponds to offsets 0..7 of +the bitmap, the second byte to the 8..15 range, and so forth. For example, after setting a few bits, getting the string value of the bitmap would look like this: diff --git a/iredis/data/commands/setex.md b/iredis/data/commands/setex.md index 6181b73..8d8b6b3 100644 --- a/iredis/data/commands/setex.md +++ b/iredis/data/commands/setex.md @@ -1,6 +1,6 @@ Set `key` to hold the string `value` and set `key` to timeout after a given -number of seconds. This command is equivalent to executing the following -commands: +number of seconds. +This command is equivalent to executing the following commands: ``` SET mykey value @@ -8,9 +8,9 @@ EXPIRE mykey seconds ``` `SETEX` is atomic, and can be reproduced by using the previous two commands -inside an `MULTI` / `EXEC` block. It is provided as a faster alternative to the -given sequence of operations, because this operation is very common when Redis -is used as a cache. +inside an `MULTI` / `EXEC` block. +It is provided as a faster alternative to the given sequence of operations, +because this operation is very common when Redis is used as a cache. An error is returned when `seconds` is invalid. diff --git a/iredis/data/commands/setnx.md b/iredis/data/commands/setnx.md index acd77df..833573c 100644 --- a/iredis/data/commands/setnx.md +++ b/iredis/data/commands/setnx.md @@ -1,13 +1,14 @@ -Set `key` to hold string `value` if `key` does not exist. In that case, it is -equal to `SET`. When `key` already holds a value, no operation is performed. +Set `key` to hold string `value` if `key` does not exist. +In that case, it is equal to `SET`. +When `key` already holds a value, no operation is performed. `SETNX` is short for "**SET** if **N**ot e**X**ists". @return @integer-reply, specifically: -- `1` if the key was set -- `0` if the key was not set +* `1` if the key was set +* `0` if the key was not set @examples @@ -21,82 +22,78 @@ GET mykey **Please note that:** -1. The following pattern is discouraged in favor of - [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit - more complex to implement, but offers better guarantees and is fault - tolerant. -2. We document the old pattern anyway because certain existing implementations - link to this page as a reference. Moreover it is an interesting example of - how Redis commands can be used in order to mount programming primitives. -3. Anyway even assuming a single-instance locking primitive, starting with - 2.6.12 it is possible to create a much simpler locking primitive, equivalent - to the one discussed here, using the `SET` command to acquire the lock, and a - simple Lua script to release the lock. The pattern is documented in the `SET` - command page. - -That said, `SETNX` can be used, and was historically used, as a locking -primitive. For example, to acquire the lock of the key `foo`, the client could -try the following: +1. The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. +2. We document the old pattern anyway because certain existing implementations link to this page as a reference. Moreover it is an interesting example of how Redis commands can be used in order to mount programming primitives. +3. Anyway even assuming a single-instance locking primitive, starting with 2.6.12 it is possible to create a much simpler locking primitive, equivalent to the one discussed here, using the `SET` command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the `SET` command page. + +That said, `SETNX` can be used, and was historically used, as a locking primitive. For example, to acquire the lock of the key `foo`, the client could try the +following: ``` SETNX lock.foo <current Unix time + lock timeout + 1> ``` If `SETNX` returns `1` the client acquired the lock, setting the `lock.foo` key -to the Unix time at which the lock should no longer be considered valid. The -client will later use `DEL lock.foo` in order to release the lock. +to the Unix time at which the lock should no longer be considered valid. +The client will later use `DEL lock.foo` in order to release the lock. -If `SETNX` returns `0` the key is already locked by some other client. We can -either return to the caller if it's a non blocking lock, or enter a loop +If `SETNX` returns `0` the key is already locked by some other client. +We can either return to the caller if it's a non blocking lock, or enter a loop retrying to hold the lock until we succeed or some kind of timeout expires. ### Handling deadlocks In the above locking algorithm there is a problem: what happens if a client -fails, crashes, or is otherwise not able to release the lock? It's possible to -detect this condition because the lock key contains a UNIX timestamp. If such a -timestamp is equal to the current Unix time the lock is no longer valid. +fails, crashes, or is otherwise not able to release the lock? +It's possible to detect this condition because the lock key contains a UNIX +timestamp. +If such a timestamp is equal to the current Unix time the lock is no longer +valid. When this happens we can't just call `DEL` against the key to remove the lock and then try to issue a `SETNX`, as there is a race condition here, when multiple clients detected an expired lock and are trying to release it. -- C1 and C2 read `lock.foo` to check the timestamp, because they both received +* C1 and C2 read `lock.foo` to check the timestamp, because they both received `0` after executing `SETNX`, as the lock is still held by C3 that crashed after holding the lock. -- C1 sends `DEL lock.foo` -- C1 sends `SETNX lock.foo` and it succeeds -- C2 sends `DEL lock.foo` -- C2 sends `SETNX lock.foo` and it succeeds -- **ERROR**: both C1 and C2 acquired the lock because of the race condition. +* C1 sends `DEL lock.foo` +* C1 sends `SETNX lock.foo` and it succeeds +* C2 sends `DEL lock.foo` +* C2 sends `SETNX lock.foo` and it succeeds +* **ERROR**: both C1 and C2 acquired the lock because of the race condition. Fortunately, it's possible to avoid this issue using the following algorithm. Let's see how C4, our sane client, uses the good algorithm: -- C4 sends `SETNX lock.foo` in order to acquire the lock +* C4 sends `SETNX lock.foo` in order to acquire the lock -- The crashed client C3 still holds it, so Redis will reply with `0` to C4. +* The crashed client C3 still holds it, so Redis will reply with `0` to C4. -- C4 sends `GET lock.foo` to check if the lock expired. If it is not, it will - sleep for some time and retry from the start. +* C4 sends `GET lock.foo` to check if the lock expired. + If it is not, it will sleep for some time and retry from the start. -- Instead, if the lock is expired because the Unix time at `lock.foo` is older - than the current Unix time, C4 tries to perform: +* Instead, if the lock is expired because the Unix time at `lock.foo` is older + than the current Unix time, C4 tries to perform: - ``` - GETSET lock.foo <current Unix timestamp + lock timeout + 1> - ``` + ``` + GETSET lock.foo <current Unix timestamp + lock timeout + 1> + ``` -- Because of the `GETSET` semantic, C4 can check if the old value stored at - `key` is still an expired timestamp. If it is, the lock was acquired. +* Because of the `GETSET` semantic, C4 can check if the old value stored at + `key` is still an expired timestamp. + If it is, the lock was acquired. -- If another client, for instance C5, was faster than C4 and acquired the lock - with the `GETSET` operation, the C4 `GETSET` operation will return a non - expired timestamp. C4 will simply restart from the first step. Note that even - if C4 set the key a bit a few seconds in the future this is not a problem. +* If another client, for instance C5, was faster than C4 and acquired the lock + with the `GETSET` operation, the C4 `GETSET` operation will return a non + expired timestamp. + C4 will simply restart from the first step. + Note that even if C4 set the key a bit a few seconds in the future this is + not a problem. -In order to make this locking algorithm more robust, a client holding a lock -should always check the timeout didn't expire before unlocking the key with -`DEL` because client failures can be complex, not just crashing but also -blocking a lot of time against some operations and trying to issue `DEL` after a -lot of time (when the LOCK is already held by another client). +In order to make this locking algorithm more robust, a +client holding a lock should always check the timeout didn't expire before +unlocking the key with `DEL` because client failures can be complex, not just +crashing but also blocking a lot of time against some operations and trying +to issue `DEL` after a lot of time (when the LOCK is already held by another +client). diff --git a/iredis/data/commands/setrange.md b/iredis/data/commands/setrange.md index 078fb34..617e3d5 100644 --- a/iredis/data/commands/setrange.md +++ b/iredis/data/commands/setrange.md @@ -1,29 +1,30 @@ Overwrites part of the string stored at _key_, starting at the specified offset, -for the entire length of _value_. If the offset is larger than the current -length of the string at _key_, the string is padded with zero-bytes to make -_offset_ fit. Non-existing keys are considered as empty strings, so this command -will make sure it holds a string large enough to be able to set _value_ at -_offset_. +for the entire length of _value_. +If the offset is larger than the current length of the string at _key_, the +string is padded with zero-bytes to make _offset_ fit. +Non-existing keys are considered as empty strings, so this command will make +sure it holds a string large enough to be able to set _value_ at _offset_. Note that the maximum offset that you can set is 2^29 -1 (536870911), as Redis -Strings are limited to 512 megabytes. If you need to grow beyond this size, you -can use multiple keys. +Strings are limited to 512 megabytes. +If you need to grow beyond this size, you can use multiple keys. **Warning**: When setting the last possible byte and the string value stored at _key_ does not yet hold a string value, or holds a small string value, Redis needs to allocate all intermediate memory which can block the server for some -time. On a 2010 MacBook Pro, setting byte number 536870911 (512MB allocation) -takes ~300ms, setting byte number 134217728 (128MB allocation) takes ~80ms, -setting bit number 33554432 (32MB allocation) takes ~30ms and setting bit number -8388608 (8MB allocation) takes ~8ms. Note that once this first allocation is -done, subsequent calls to `SETRANGE` for the same _key_ will not have the -allocation overhead. +time. +On a 2010 MacBook Pro, setting byte number 536870911 (512MB allocation) takes +~300ms, setting byte number 134217728 (128MB allocation) takes ~80ms, setting +bit number 33554432 (32MB allocation) takes ~30ms and setting bit number 8388608 +(8MB allocation) takes ~8ms. +Note that once this first allocation is done, subsequent calls to `SETRANGE` for +the same _key_ will not have the allocation overhead. ## Patterns Thanks to `SETRANGE` and the analogous `GETRANGE` commands, you can use Redis -strings as a linear array with O(1) random access. This is a very fast and -efficient storage in many real world use cases. +strings as a linear array with O(1) random access. +This is a very fast and efficient storage in many real world use cases. @return diff --git a/iredis/data/commands/shutdown.md b/iredis/data/commands/shutdown.md index cd48260..5dca6de 100644 --- a/iredis/data/commands/shutdown.md +++ b/iredis/data/commands/shutdown.md @@ -1,62 +1,73 @@ The command behavior is the following: -- Stop all the clients. -- Perform a blocking SAVE if at least one **save point** is configured. -- Flush the Append Only File if AOF is enabled. -- Quit the server. +* If there are any replicas lagging behind in replication: + * Pause clients attempting to write by performing a `CLIENT PAUSE` with the `WRITE` option. + * Wait up to the configured `shutdown-timeout` (default 10 seconds) for replicas to catch up the replication offset. +* Stop all the clients. +* Perform a blocking SAVE if at least one **save point** is configured. +* Flush the Append Only File if AOF is enabled. +* Quit the server. If persistence is enabled this commands makes sure that Redis is switched off -without the lost of any data. This is not guaranteed if the client uses simply -`SAVE` and then `QUIT` because other clients may alter the DB data between the -two commands. +without any data loss. Note: A Redis instance that is configured for not persisting on disk (no AOF configured, nor "save" directive) will not dump the RDB file on `SHUTDOWN`, as usually you don't want Redis instances used only for caching to block on when shutting down. -## SAVE and NOSAVE modifiers +Also note: If Redis receives one of the signals `SIGTERM` and `SIGINT`, the same shutdown sequence is performed. +See also [Signal Handling](/topics/signals). -It is possible to specify an optional modifier to alter the behavior of the -command. Specifically: +## Modifiers -- **SHUTDOWN SAVE** will force a DB saving operation even if no save points are - configured. -- **SHUTDOWN NOSAVE** will prevent a DB saving operation even if one or more - save points are configured. (You can think of this variant as an hypothetical - **ABORT** command that just stops the server). +It is possible to specify optional modifiers to alter the behavior of the command. +Specifically: + +* **SAVE** will force a DB saving operation even if no save points are configured. +* **NOSAVE** will prevent a DB saving operation even if one or more save points are configured. +* **NOW** skips waiting for lagging replicas, i.e. it bypasses the first step in the shutdown sequence. +* **FORCE** ignores any errors that would normally prevent the server from exiting. + For details, see the following section. +* **ABORT** cancels an ongoing shutdown and cannot be combined with other flags. ## Conditions where a SHUTDOWN fails -When the Append Only File is enabled the shutdown may fail because the system is -in a state that does not allow to safely immediately persist on disk. - -Normally if there is an AOF child process performing an AOF rewrite, Redis will -simply kill it and exit. However there are two conditions where it is unsafe to -do so, and the **SHUTDOWN** command will be refused with an error instead. This -happens when: - -- The user just turned on AOF, and the server triggered the first AOF rewrite in - order to create the initial AOF file. In this context, stopping will result in - losing the dataset at all: once restarted, the server will potentially have - AOF enabled without having any AOF file at all. -- A replica with AOF enabled, reconnected with its master, performed a full - resynchronization, and restarted the AOF file, triggering the initial AOF - creation process. In this case not completing the AOF rewrite is dangerous - because the latest dataset received from the master would be lost. The new - master can actually be even a different instance (if the **REPLICAOF** or - **SLAVEOF** command was used in order to reconfigure the replica), so it is - important to finish the AOF rewrite and start with the correct data set - representing the data set in memory when the server was terminated. - -There are conditions when we want just to terminate a Redis instance ASAP, -regardless of what its content is. In such a case, the right combination of -commands is to send a **CONFIG appendonly no** followed by a **SHUTDOWN -NOSAVE**. The first command will turn off the AOF if needed, and will terminate -the AOF rewriting child if there is one active. The second command will not have -any problem to execute since the AOF is no longer enabled. +When a save point is configured or the **SAVE** modifier is specified, the shutdown may fail if the RDB file can't be saved. +Then, the server continues to run in order to ensure no data loss. +This may be bypassed using the **FORCE** modifier, causing the server to exit anyway. + +When the Append Only File is enabled the shutdown may fail because the +system is in a state that does not allow to safely immediately persist +on disk. + +Normally if there is an AOF child process performing an AOF rewrite, Redis +will simply kill it and exit. +However, there are situations where it is unsafe to do so and, unless the **FORCE** modifier is specified, the **SHUTDOWN** command will be refused with an error instead. +This happens in the following situations: + +* The user just turned on AOF, and the server triggered the first AOF rewrite in order to create the initial AOF file. In this context, stopping will result in losing the dataset at all: once restarted, the server will potentially have AOF enabled without having any AOF file at all. +* A replica with AOF enabled, reconnected with its master, performed a full resynchronization, and restarted the AOF file, triggering the initial AOF creation process. In this case not completing the AOF rewrite is dangerous because the latest dataset received from the master would be lost. The new master can actually be even a different instance (if the **REPLICAOF** or **SLAVEOF** command was used in order to reconfigure the replica), so it is important to finish the AOF rewrite and start with the correct data set representing the data set in memory when the server was terminated. + +There are situations when we want just to terminate a Redis instance ASAP, regardless of what its content is. +In such a case, the command **SHUTDOWN NOW NOSAVE FORCE** can be used. +In versions before 7.0, where the **NOW** and **FORCE** flags are not available, the right combination of commands is to send a **CONFIG appendonly no** followed by a **SHUTDOWN NOSAVE**. +The first command will turn off the AOF if needed, and will terminate the AOF rewriting child if there is one active. +The second command will not have any problem to execute since the AOF is no longer enabled. + +## Minimize the risk of data loss + +Since Redis 7.0, the server waits for lagging replicas up to a configurable `shutdown-timeout`, by default 10 seconds, before shutting down. +This provides a best effort minimizing the risk of data loss in a situation where no save points are configured and AOF is disabled. +Before version 7.0, shutting down a heavily loaded master node in a diskless setup was more likely to result in data loss. +To minimize the risk of data loss in such setups, it's advised to trigger a manual `FAILOVER` (or `CLUSTER FAILOVER`) to demote the master to a replica and promote one of the replicas to be the new master, before shutting down a master node. @return -@simple-string-reply on error. On success nothing is returned since the server -quits and the connection is closed. +@simple-string-reply: `OK` if `ABORT` was specified and shutdown was aborted. +On successful shutdown, nothing is returned since the server quits and the connection is closed. +On failure, an error is returned. + +## Behavior change history + +* `>= 7.0.0`: Introduced waiting for lagging replicas before exiting.
\ No newline at end of file diff --git a/iredis/data/commands/sinter.md b/iredis/data/commands/sinter.md index e4ab023..465b3d7 100644 --- a/iredis/data/commands/sinter.md +++ b/iredis/data/commands/sinter.md @@ -10,9 +10,9 @@ key3 = {a,c,e} SINTER key1 key2 key3 = {c} ``` -Keys that do not exist are considered to be empty sets. With one of the keys -being an empty set, the resulting set is also empty (since set intersection with -an empty set always results in an empty set). +Keys that do not exist are considered to be empty sets. +With one of the keys being an empty set, the resulting set is also empty (since +set intersection with an empty set always results in an empty set). @return diff --git a/iredis/data/commands/sintercard.md b/iredis/data/commands/sintercard.md new file mode 100644 index 0000000..24473e5 --- /dev/null +++ b/iredis/data/commands/sintercard.md @@ -0,0 +1,28 @@ +This command is similar to `SINTER`, but instead of returning the result set, it returns just the cardinality of the result. +Returns the cardinality of the set which would result from the intersection of all the given sets. + +Keys that do not exist are considered to be empty sets. +With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). + +By default, the command calculates the cardinality of the intersection of all given sets. +When provided with the optional `LIMIT` argument (which defaults to 0 and means unlimited), if the intersection cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality. +Such implementation ensures a significant speedup for queries where the limit is lower than the actual intersection cardinality. + +@return + +@integer-reply: the number of elements in the resulting intersection. + +@examples + +```cli +SADD key1 "a" +SADD key1 "b" +SADD key1 "c" +SADD key1 "d" +SADD key2 "c" +SADD key2 "d" +SADD key2 "e" +SINTER key1 key2 +SINTERCARD 2 key1 key2 +SINTERCARD 2 key1 key2 LIMIT 1 +``` diff --git a/iredis/data/commands/sismember.md b/iredis/data/commands/sismember.md index 051f87d..219cd6e 100644 --- a/iredis/data/commands/sismember.md +++ b/iredis/data/commands/sismember.md @@ -4,8 +4,8 @@ Returns if `member` is a member of the set stored at `key`. @integer-reply, specifically: -- `1` if the element is a member of the set. -- `0` if the element is not a member of the set, or if `key` does not exist. +* `1` if the element is a member of the set. +* `0` if the element is not a member of the set, or if `key` does not exist. @examples diff --git a/iredis/data/commands/slaveof.md b/iredis/data/commands/slaveof.md index 1250d43..34b9574 100644 --- a/iredis/data/commands/slaveof.md +++ b/iredis/data/commands/slaveof.md @@ -1,24 +1,21 @@ -**A note about the word slave used in this man page and command name**: Starting -with Redis 5 this command: starting with Redis version 5, if not for backward -compatibility, the Redis project no longer uses the word slave. Please use the -new command `REPLICAOF`. The command `SLAVEOF` will continue to work for -backward compatibility. +**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command `REPLICAOF`. The command `SLAVEOF` will continue to work for backward compatibility. -The `SLAVEOF` command can change the replication settings of a replica on the -fly. If a Redis server is already acting as replica, the command `SLAVEOF` NO -ONE will turn off the replication, turning the Redis server into a MASTER. In -the proper form `SLAVEOF` hostname port will make the server a replica of +The `SLAVEOF` command can change the replication settings of a replica on the fly. +If a Redis server is already acting as replica, the command `SLAVEOF` NO ONE will +turn off the replication, turning the Redis server into a MASTER. +In the proper form `SLAVEOF` hostname port will make the server a replica of another server listening at the specified hostname and port. -If a server is already a replica of some master, `SLAVEOF` hostname port will -stop the replication against the old server and start the synchronization -against the new one, discarding the old dataset. +If a server is already a replica of some master, `SLAVEOF` hostname port will stop +the replication against the old server and start the synchronization against the +new one, discarding the old dataset. The form `SLAVEOF` NO ONE will stop replication, turning the server into a -MASTER, but will not discard the replication. So, if the old master stops -working, it is possible to turn the replica into a master and set the -application to use this new master in read/write. Later when the other Redis -server is fixed, it can be reconfigured to work as a replica. +MASTER, but will not discard the replication. +So, if the old master stops working, it is possible to turn the replica into a +master and set the application to use this new master in read/write. +Later when the other Redis server is fixed, it can be reconfigured to work as a +replica. @return diff --git a/iredis/data/commands/slowlog-get.md b/iredis/data/commands/slowlog-get.md new file mode 100644 index 0000000..d496e39 --- /dev/null +++ b/iredis/data/commands/slowlog-get.md @@ -0,0 +1,26 @@ +The `SLOWLOG GET` command returns entries from the slow log in chronological order. + +The Redis Slow Log is a system to log queries that exceeded a specified execution time. +The execution time does not include I/O operations like talking with the client, sending the reply and so forth, but just the time needed to actually execute the command (this is the only stage of command execution where the thread is blocked and can not serve other requests in the meantime). + +A new entry is added to the slow log whenever a command exceeds the execution time threshold defined by the `slowlog-log-slower-than` configuration directive. +The maximum number of entries in the slow log is governed by the `slowlog-max-len` configuration directive. + +By default the command returns all of the entries in the log. The optional `count` argument limits the number of returned entries, so the command returns at most up to `count` entries. + +Each entry from the slow log is comprised of the following six values: + +1. A unique progressive identifier for every slow log entry. +2. The unix timestamp at which the logged command was processed. +3. The amount of time needed for its execution, in microseconds. +4. The array composing the arguments of the command. +5. Client IP address and port. +6. Client name if set via the `CLIENT SETNAME` command. + +The entry's unique ID can be used in order to avoid processing slow log entries multiple times (for instance you may have a script sending you an email alert for every new slow log entry). +The ID is never reset in the course of the Redis server execution, only a server +restart will reset it. + +@reply + +@array-reply: a list of slow log entries. diff --git a/iredis/data/commands/slowlog-help.md b/iredis/data/commands/slowlog-help.md new file mode 100644 index 0000000..a70f3a5 --- /dev/null +++ b/iredis/data/commands/slowlog-help.md @@ -0,0 +1,5 @@ +The `SLOWLOG HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/slowlog-len.md b/iredis/data/commands/slowlog-len.md new file mode 100644 index 0000000..6f0d977 --- /dev/null +++ b/iredis/data/commands/slowlog-len.md @@ -0,0 +1,12 @@ +This command returns the current number of entries in the slow log. + +A new entry is added to the slow log whenever a command exceeds the execution time threshold defined by the `slowlog-log-slower-than` configuration directive. +The maximum number of entries in the slow log is governed by the `slowlog-max-len` configuration directive. +Once the slog log reaches its maximal size, the oldest entry is removed whenever a new entry is created. +The slow log can be cleared with the `SLOWLOG RESET` command. + +@reply + +@integer-reply + +The number of entries in the slow log. diff --git a/iredis/data/commands/slowlog-reset.md b/iredis/data/commands/slowlog-reset.md new file mode 100644 index 0000000..b522c26 --- /dev/null +++ b/iredis/data/commands/slowlog-reset.md @@ -0,0 +1,7 @@ +This command resets the slow log, clearing all entries in it. + +Once deleted the information is lost forever. + +@reply + +@simple-string-reply: `OK` diff --git a/iredis/data/commands/slowlog.md b/iredis/data/commands/slowlog.md index 258c122..26e5bb7 100644 --- a/iredis/data/commands/slowlog.md +++ b/iredis/data/commands/slowlog.md @@ -1,86 +1,3 @@ -This command is used in order to read and reset the Redis slow queries log. +This is a container command for slow log management commands. -## Redis slow log overview - -The Redis Slow Log is a system to log queries that exceeded a specified -execution time. The execution time does not include I/O operations like talking -with the client, sending the reply and so forth, but just the time needed to -actually execute the command (this is the only stage of command execution where -the thread is blocked and can not serve other requests in the meantime). - -You can configure the slow log with two parameters: _slowlog-log-slower-than_ -tells Redis what is the execution time, in microseconds, to exceed in order for -the command to get logged. Note that a negative number disables the slow log, -while a value of zero forces the logging of every command. _slowlog-max-len_ is -the length of the slow log. The minimum value is zero. When a new command is -logged and the slow log is already at its maximum length, the oldest one is -removed from the queue of logged commands in order to make space. - -The configuration can be done by editing `redis.conf` or while the server is -running using the `CONFIG GET` and `CONFIG SET` commands. - -## Reading the slow log - -The slow log is accumulated in memory, so no file is written with information -about the slow command executions. This makes the slow log remarkably fast at -the point that you can enable the logging of all the commands (setting the -_slowlog-log-slower-than_ config parameter to zero) with minor performance hit. - -To read the slow log the **SLOWLOG GET** command is used, that returns every -entry in the slow log. It is possible to return only the N most recent entries -passing an additional argument to the command (for instance **SLOWLOG GET 10**). -The default requested length is 10 (when the argument is omitted). It's possible -to pass -1 to get the entire slowlog. - -Note that you need a recent version of redis-cli in order to read the slow log -output, since it uses some features of the protocol that were not formerly -implemented in redis-cli (deeply nested multi bulk replies). - -## Output format - -``` -redis 127.0.0.1:6379> slowlog get 2 -1) 1) (integer) 14 - 2) (integer) 1309448221 - 3) (integer) 15 - 4) 1) "ping" -2) 1) (integer) 13 - 2) (integer) 1309448128 - 3) (integer) 30 - 4) 1) "slowlog" - 2) "get" - 3) "100" -``` - -There are also optional fields emitted only by Redis 4.0 or greater: - -``` -5) "127.0.0.1:58217" -6) "worker-123" -``` - -Every entry is composed of four (or six starting with Redis 4.0) fields: - -- A unique progressive identifier for every slow log entry. -- The unix timestamp at which the logged command was processed. -- The amount of time needed for its execution, in microseconds. -- The array composing the arguments of the command. -- Client IP address and port (4.0 only). -- Client name if set via the `CLIENT SETNAME` command (4.0 only). - -The entry's unique ID can be used in order to avoid processing slow log entries -multiple times (for instance you may have a script sending you an email alert -for every new slow log entry). - -The ID is never reset in the course of the Redis server execution, only a server -restart will reset it. - -## Obtaining the current length of the slow log - -It is possible to get just the length of the slow log using the command -**SLOWLOG LEN**. - -## Resetting the slow log. - -You can reset the slow log using the **SLOWLOG RESET** command. Once deleted the -information is lost forever. +To see the list of available commands you can call `SLOWLOG HELP`. diff --git a/iredis/data/commands/smismember.md b/iredis/data/commands/smismember.md index 6d66eeb..c4cec64 100644 --- a/iredis/data/commands/smismember.md +++ b/iredis/data/commands/smismember.md @@ -1,12 +1,11 @@ Returns whether each `member` is a member of the set stored at `key`. -For every `member`, `1` is returned if the value is a member of the set, or `0` -if the element is not a member of the set or if `key` does not exist. +For every `member`, `1` is returned if the value is a member of the set, or `0` if the element is not a member of the set or if `key` does not exist. @return -@array-reply: list representing the membership of the given elements, in the -same order as they are requested. +@array-reply: list representing the membership of the given elements, in the same +order as they are requested. @examples diff --git a/iredis/data/commands/smove.md b/iredis/data/commands/smove.md index d8c12fa..6b2400b 100644 --- a/iredis/data/commands/smove.md +++ b/iredis/data/commands/smove.md @@ -1,11 +1,14 @@ -Move `member` from the set at `source` to the set at `destination`. This -operation is atomic. In every given moment the element will appear to be a -member of `source` **or** `destination` for other clients. +Move `member` from the set at `source` to the set at `destination`. +This operation is atomic. +In every given moment the element will appear to be a member of `source` **or** +`destination` for other clients. If the source set does not exist or does not contain the specified element, no -operation is performed and `0` is returned. Otherwise, the element is removed -from the source set and added to the destination set. When the specified element -already exists in the destination set, it is only removed from the source set. +operation is performed and `0` is returned. +Otherwise, the element is removed from the source set and added to the +destination set. +When the specified element already exists in the destination set, it is only +removed from the source set. An error is returned if `source` or `destination` does not hold a set value. @@ -13,8 +16,8 @@ An error is returned if `source` or `destination` does not hold a set value. @integer-reply, specifically: -- `1` if the element is moved. -- `0` if the element is not a member of `source` and no operation was performed. +* `1` if the element is moved. +* `0` if the element is not a member of `source` and no operation was performed. @examples diff --git a/iredis/data/commands/sort.md b/iredis/data/commands/sort.md index 0703b5e..2a091db 100644 --- a/iredis/data/commands/sort.md +++ b/iredis/data/commands/sort.md @@ -1,6 +1,10 @@ Returns or stores the elements contained in the [list][tdtl], [set][tdts] or -[sorted set][tdtss] at `key`. By default, sorting is numeric and elements are -compared by their value interpreted as double precision floating point number. +[sorted set][tdtss] at `key`. + +There is also the `SORT_RO` read-only variant of this command. + +By default, sorting is numeric and elements are compared by their value +interpreted as double precision floating point number. This is `SORT` in its simplest form: [tdtl]: /topics/data-types#lists @@ -12,8 +16,8 @@ SORT mylist ``` Assuming `mylist` is a list of numbers, this command will return the same list -with the elements sorted from small to large. In order to sort the numbers from -large to small, use the `!DESC` modifier: +with the elements sorted from small to large. +In order to sort the numbers from large to small, use the `!DESC` modifier: ``` SORT mylist DESC @@ -29,18 +33,20 @@ SORT mylist ALPHA Redis is UTF-8 aware, assuming you correctly set the `!LC_COLLATE` environment variable. -The number of returned elements can be limited using the `!LIMIT` modifier. This -modifier takes the `offset` argument, specifying the number of elements to skip -and the `count` argument, specifying the number of elements to return from -starting at `offset`. The following example will return 10 elements of the -sorted version of `mylist`, starting at element 0 (`offset` is zero-based): +The number of returned elements can be limited using the `!LIMIT` modifier. +This modifier takes the `offset` argument, specifying the number of elements to +skip and the `count` argument, specifying the number of elements to return from +starting at `offset`. +The following example will return 10 elements of the sorted version of `mylist`, +starting at element 0 (`offset` is zero-based): ``` SORT mylist LIMIT 0 10 ``` -Almost all modifiers can be used together. The following example will return the -first 5 elements, lexicographically sorted in descending order: +Almost all modifiers can be used together. +The following example will return the first 5 elements, lexicographically sorted +in descending order: ``` SORT mylist LIMIT 0 5 ALPHA DESC @@ -49,27 +55,28 @@ SORT mylist LIMIT 0 5 ALPHA DESC ## Sorting by external keys Sometimes you want to sort elements using external keys as weights to compare -instead of comparing the actual elements in the list, set or sorted set. Let's -say the list `mylist` contains the elements `1`, `2` and `3` representing unique -IDs of objects stored in `object_1`, `object_2` and `object_3`. When these -objects have associated weights stored in `weight_1`, `weight_2` and `weight_3`, -`SORT` can be instructed to use these weights to sort `mylist` with the -following statement: +instead of comparing the actual elements in the list, set or sorted set. +Let's say the list `mylist` contains the elements `1`, `2` and `3` representing +unique IDs of objects stored in `object_1`, `object_2` and `object_3`. +When these objects have associated weights stored in `weight_1`, `weight_2` and +`weight_3`, `SORT` can be instructed to use these weights to sort `mylist` with +the following statement: ``` SORT mylist BY weight_* ``` The `BY` option takes a pattern (equal to `weight_*` in this example) that is -used to generate the keys that are used for sorting. These key names are -obtained substituting the first occurrence of `*` with the actual value of the -element in the list (`1`, `2` and `3` in this example). +used to generate the keys that are used for sorting. +These key names are obtained substituting the first occurrence of `*` with the +actual value of the element in the list (`1`, `2` and `3` in this example). ## Skip sorting the elements The `!BY` option can also take a non-existent key, which causes `SORT` to skip -the sorting operation. This is useful if you want to retrieve external keys (see -the `!GET` option below) without the overhead of sorting. +the sorting operation. +This is useful if you want to retrieve external keys (see the `!GET` option +below) without the overhead of sorting. ``` SORT mylist BY nosort @@ -77,10 +84,11 @@ SORT mylist BY nosort ## Retrieving external keys -Our previous example returns just the sorted IDs. In some cases, it is more -useful to get the actual objects instead of their IDs (`object_1`, `object_2` -and `object_3`). Retrieving external keys based on the elements in a list, set -or sorted set can be done with the following command: +Our previous example returns just the sorted IDs. +In some cases, it is more useful to get the actual objects instead of their IDs +(`object_1`, `object_2` and `object_3`). +Retrieving external keys based on the elements in a list, set or sorted set can +be done with the following command: ``` SORT mylist BY weight_* GET object_* @@ -95,11 +103,21 @@ It is also possible to `!GET` the element itself using the special pattern `#`: SORT mylist BY weight_* GET object_* GET # ``` +## Restrictions for using external keys + +When enabling `Redis cluster-mode` there is no way to guarantee the existence of the external keys on the node which the command is processed on. +In this case, any use of `GET` or `BY` which reference external key pattern will cause the command to fail with an error. + +Starting from Redis 7.0, any use of `GET` or `BY` which reference external key pattern will only be allowed in case the current user running the command has full key read permissions. +Full key read permissions can be set for the user by, for example, specifying `'%R~*'` or `'~*` with the relevant command access rules. +You can check the `ACL SETUSER` command manual for more information on setting ACL access rules. +If full key read permissions aren't set, the command will fail with an error. + ## Storing the result of a SORT operation -By default, `SORT` returns the sorted elements to the client. With the `!STORE` -option, the result will be stored as a list at the specified key instead of -being returned to the client. +By default, `SORT` returns the sorted elements to the client. +With the `!STORE` option, the result will be stored as a list at the specified +key instead of being returned to the client. ``` SORT mylist BY weight_* STORE resultkey @@ -107,14 +125,15 @@ SORT mylist BY weight_* STORE resultkey An interesting pattern using `SORT ... STORE` consists in associating an `EXPIRE` timeout to the resulting key so that in applications where the result -of a `SORT` operation can be cached for some time. Other clients will use the -cached list instead of calling `SORT` for every request. When the key will -timeout, an updated version of the cache can be created by calling -`SORT ... STORE` again. +of a `SORT` operation can be cached for some time. +Other clients will use the cached list instead of calling `SORT` for every +request. +When the key will timeout, an updated version of the cache can be created by +calling `SORT ... STORE` again. Note that for correctly implementing this pattern it is important to avoid -multiple clients rebuilding the cache at the same time. Some kind of locking is -needed here (for instance using `SETNX`). +multiple clients rebuilding the cache at the same time. +Some kind of locking is needed here (for instance using `SETNX`). ## Using hashes in `!BY` and `!GET` @@ -125,12 +144,11 @@ following syntax: SORT mylist BY weight_*->fieldname GET object_*->fieldname ``` -The string `->` is used to separate the key name from the hash field name. The -key is substituted as documented above, and the hash stored at the resulting key -is accessed to retrieve the specified hash field. +The string `->` is used to separate the key name from the hash field name. +The key is substituted as documented above, and the hash stored at the resulting +key is accessed to retrieve the specified hash field. @return -@array-reply: without passing the `store` option the command returns a list of -sorted elements. @integer-reply: when the `store` option is specified the -command returns the number of sorted elements in the destination list. +@array-reply: without passing the `store` option the command returns a list of sorted elements. +@integer-reply: when the `store` option is specified the command returns the number of sorted elements in the destination list. diff --git a/iredis/data/commands/sort_ro.md b/iredis/data/commands/sort_ro.md new file mode 100644 index 0000000..66223a3 --- /dev/null +++ b/iredis/data/commands/sort_ro.md @@ -0,0 +1,17 @@ +Read-only variant of the `SORT` command. It is exactly like the original `SORT` but refuses the `STORE` option and can safely be used in read-only replicas. + +Since the original `SORT` has a `STORE` option it is technically flagged as a writing command in the Redis command table. For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the `READONLY` command of Redis Cluster). + +The `SORT_RO` variant was introduced in order to allow `SORT` behavior in read-only replicas without breaking compatibility on command flags. + +See original `SORT` for more details. + +@examples + +``` +SORT_RO mylist BY weight_*->fieldname GET object_*->fieldname +``` + +@return + +@array-reply: a list of sorted elements. diff --git a/iredis/data/commands/spop.md b/iredis/data/commands/spop.md index 057a025..8c86a9a 100644 --- a/iredis/data/commands/spop.md +++ b/iredis/data/commands/spop.md @@ -1,8 +1,6 @@ -Removes and returns one or more random members from the set value store at -`key`. +Removes and returns one or more random members from the set value store at `key`. -This operation is similar to `SRANDMEMBER`, that returns one or more random -elements from a set but does not remove it. +This operation is similar to `SRANDMEMBER`, that returns one or more random elements from a set but does not remove it. By default, the command pops a single member from the set. When provided with the optional `count` argument, the reply will consist of up to `count` members, @@ -18,10 +16,6 @@ When called with the `count` argument: @array-reply: the removed members, or an empty array when `key` does not exist. -@history - -- `>= 3.2`: Added the `count` argument. - @examples ```cli @@ -35,9 +29,6 @@ SADD myset "five" SPOP myset 3 SMEMBERS myset ``` - ## Distribution of returned elements -Note that this command is not suitable when you need a guaranteed uniform -distribution of the returned elements. For more information about the algorithms -used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms. +Note that this command is not suitable when you need a guaranteed uniform distribution of the returned elements. For more information about the algorithms used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms. diff --git a/iredis/data/commands/spublish.md b/iredis/data/commands/spublish.md new file mode 100644 index 0000000..e8b6925 --- /dev/null +++ b/iredis/data/commands/spublish.md @@ -0,0 +1,20 @@ +Posts a message to the given shard channel. + +In Redis Cluster, shard channels are assigned to slots by the same algorithm used to assign keys to slots. +A shard message must be sent to a node that own the slot the shard channel is hashed to. +The cluster makes sure that published shard messages are forwarded to all the node in the shard, so clients can subscribe to a shard channel by connecting to any one of the nodes in the shard. + +For more information about sharded pubsub, see [Sharded Pubsub](/topics/pubsub#sharded-pubsub). + +@return + +@integer-reply: the number of clients that received the message. + +@examples + +For example the following command publish to channel `orders` with a subscriber already waiting for message(s). + +``` +> spublish orders hello +(integer) 1 +``` diff --git a/iredis/data/commands/srandmember.md b/iredis/data/commands/srandmember.md index 640ccdc..dd2d4a8 100644 --- a/iredis/data/commands/srandmember.md +++ b/iredis/data/commands/srandmember.md @@ -1,22 +1,16 @@ -When called with just the `key` argument, return a random element from the set -value stored at `key`. +When called with just the `key` argument, return a random element from the set value stored at `key`. -If the provided `count` argument is positive, return an array of **distinct -elements**. The array's length is either `count` or the set's cardinality -(`SCARD`), whichever is lower. +If the provided `count` argument is positive, return an array of **distinct elements**. +The array's length is either `count` or the set's cardinality (`SCARD`), whichever is lower. -If called with a negative `count`, the behavior changes and the command is -allowed to return the **same element multiple times**. In this case, the number -of returned elements is the absolute value of the specified `count`. +If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. +In this case, the number of returned elements is the absolute value of the specified `count`. @return -@bulk-string-reply: without the additional `count` argument, the command returns -a Bulk Reply with the randomly selected element, or `nil` when `key` does not -exist. +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. -@array-reply: when the additional `count` argument is passed, the command -returns an array of elements, or an empty array when `key` does not exist. +@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. @examples @@ -27,42 +21,26 @@ SRANDMEMBER myset 2 SRANDMEMBER myset -5 ``` -@history - -- `>= 2.6.0`: Added the optional `count` argument. - ## Specification of the behavior when count is passed When the `count` argument is a positive value this command behaves as follows: -- No repeated elements are returned. -- If `count` is bigger than the set's cardinality, the command will only return - the whole set without additional elements. -- The order of elements in the reply is not truly random, so it is up to the - client to shuffle them if needed. +* No repeated elements are returned. +* If `count` is bigger than the set's cardinality, the command will only return the whole set without additional elements. +* The order of elements in the reply is not truly random, so it is up to the client to shuffle them if needed. When the `count` is a negative value, the behavior changes as follows: -- Repeating elements are possible. -- Exactly `count` elements, or an empty array if the set is empty (non-existing - key), are always returned. -- The order of elements in the reply is truly random. +* Repeating elements are possible. +* Exactly `count` elements, or an empty array if the set is empty (non-existing key), are always returned. +* The order of elements in the reply is truly random. ## Distribution of returned elements -Note: this section is relevant only for Redis 5 or below, as Redis 6 implements -a fairer algorithm. +Note: this section is relevant only for Redis 5 or below, as Redis 6 implements a fairer algorithm. -The distribution of the returned elements is far from perfect when the number of -elements in the set is small, this is due to the fact that we used an -approximated random element function that does not really guarantees good -distribution. +The distribution of the returned elements is far from perfect when the number of elements in the set is small, this is due to the fact that we used an approximated random element function that does not really guarantees good distribution. -The algorithm used, that is implemented inside dict.c, samples the hash table -buckets to find a non-empty one. Once a non empty bucket is found, since we use -chaining in our hash table implementation, the number of elements inside the -bucket is checked and a random element is selected. +The algorithm used, that is implemented inside dict.c, samples the hash table buckets to find a non-empty one. Once a non empty bucket is found, since we use chaining in our hash table implementation, the number of elements inside the bucket is checked and a random element is selected. -This means that if you have two non-empty buckets in the entire hash table, and -one has three elements while one has just one, the element that is alone in its -bucket will be returned with much higher probability. +This means that if you have two non-empty buckets in the entire hash table, and one has three elements while one has just one, the element that is alone in its bucket will be returned with much higher probability. diff --git a/iredis/data/commands/srem.md b/iredis/data/commands/srem.md index 6ead535..fca5b75 100644 --- a/iredis/data/commands/srem.md +++ b/iredis/data/commands/srem.md @@ -1,6 +1,7 @@ -Remove the specified members from the set stored at `key`. Specified members -that are not a member of this set are ignored. If `key` does not exist, it is -treated as an empty set and this command returns `0`. +Remove the specified members from the set stored at `key`. +Specified members that are not a member of this set are ignored. +If `key` does not exist, it is treated as an empty set and this command returns +`0`. An error is returned when the value stored at `key` is not a set. @@ -9,11 +10,6 @@ An error is returned when the value stored at `key` is not a set. @integer-reply: the number of members that were removed from the set, not including non existing members. -@history - -- `>= 2.4`: Accepts multiple `member` arguments. Redis versions older than 2.4 - can only remove a set member per call. - @examples ```cli diff --git a/iredis/data/commands/ssubscribe.md b/iredis/data/commands/ssubscribe.md new file mode 100644 index 0000000..bf7d30e --- /dev/null +++ b/iredis/data/commands/ssubscribe.md @@ -0,0 +1,21 @@ +Subscribes the client to the specified shard channels. + +In a Redis cluster, shard channels are assigned to slots by the same algorithm used to assign keys to slots. +Client(s) can subscribe to a node covering a slot (primary/replica) to receive the messages published. +All the specified shard channels needs to belong to a single slot to subscribe in a given `SSUBSCRIBE` call, +A client can subscribe to channels across different slots over separate `SSUBSCRIBE` call. + +For more information about sharded Pub/Sub, see [Sharded Pub/Sub](/topics/pubsub#sharded-pubsub). + +@examples + +``` +> ssubscribe orders +Reading messages... (press Ctrl-C to quit) +1) "ssubscribe" +2) "orders" +3) (integer) 1 +1) "smessage" +2) "orders" +3) "hello" +``` diff --git a/iredis/data/commands/strlen.md b/iredis/data/commands/strlen.md index 99a9c55..e504180 100644 --- a/iredis/data/commands/strlen.md +++ b/iredis/data/commands/strlen.md @@ -1,5 +1,5 @@ -Returns the length of the string value stored at `key`. An error is returned -when `key` holds a non-string value. +Returns the length of the string value stored at `key`. +An error is returned when `key` holds a non-string value. @return diff --git a/iredis/data/commands/subscribe.md b/iredis/data/commands/subscribe.md index 2715c63..bbc7127 100644 --- a/iredis/data/commands/subscribe.md +++ b/iredis/data/commands/subscribe.md @@ -1,9 +1,9 @@ Subscribes the client to the specified channels. Once the client enters the subscribed state it is not supposed to issue any -other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, +other commands, except for additional `SUBSCRIBE`, `SSUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, `SUNSUBSCRIBE`, `PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands. -@history +## Behavior change history -- `>= 6.2`: `RESET` can be called to exit subscribed state. +* `>= 6.2.0`: `RESET` can be called to exit subscribed state.
\ No newline at end of file diff --git a/iredis/data/commands/substr.md b/iredis/data/commands/substr.md new file mode 100644 index 0000000..7283def --- /dev/null +++ b/iredis/data/commands/substr.md @@ -0,0 +1,22 @@ +Returns the substring of the string value stored at `key`, determined by the +offsets `start` and `end` (both are inclusive). +Negative offsets can be used in order to provide an offset starting from the end +of the string. +So -1 means the last character, -2 the penultimate and so forth. + +The function handles out of range requests by limiting the resulting range to +the actual length of the string. + +@return + +@bulk-string-reply + +@examples + +```cli +SET mykey "This is a string" +GETRANGE mykey 0 3 +GETRANGE mykey -3 -1 +GETRANGE mykey 0 -1 +GETRANGE mykey 10 100 +``` diff --git a/iredis/data/commands/sunsubscribe.md b/iredis/data/commands/sunsubscribe.md new file mode 100644 index 0000000..7ce76c3 --- /dev/null +++ b/iredis/data/commands/sunsubscribe.md @@ -0,0 +1,8 @@ +Unsubscribes the client from the given shard channels, or from all of them if none is given. + +When no shard channels are specified, the client is unsubscribed from all the previously subscribed shard channels. +In this case a message for every unsubscribed shard channel will be sent to the client. + +Note: The global channels and shard channels needs to be unsubscribed from separately. + +For more information about sharded Pub/Sub, see [Sharded Pub/Sub](/topics/pubsub#sharded-pubsub). diff --git a/iredis/data/commands/swapdb.md b/iredis/data/commands/swapdb.md index 708096a..ead2db0 100644 --- a/iredis/data/commands/swapdb.md +++ b/iredis/data/commands/swapdb.md @@ -1,12 +1,10 @@ -This command swaps two Redis databases, so that immediately all the clients -connected to a given database will see the data of the other database, and the -other way around. Example: +This command swaps two Redis databases, so that immediately all the +clients connected to a given database will see the data of the other database, and +the other way around. Example: SWAPDB 0 1 -This will swap database 0 with database 1. All the clients connected with -database 0 will immediately see the new data, exactly like all the clients -connected with database 1 will see the data that was formerly of database 0. +This will swap database 0 with database 1. All the clients connected with database 0 will immediately see the new data, exactly like all the clients connected with database 1 will see the data that was formerly of database 0. @return diff --git a/iredis/data/commands/sync.md b/iredis/data/commands/sync.md index 48250b4..cb95847 100644 --- a/iredis/data/commands/sync.md +++ b/iredis/data/commands/sync.md @@ -2,14 +2,13 @@ Initiates a replication stream from the master. The `SYNC` command is called by Redis replicas for initiating a replication stream from the master. It has been replaced in newer versions of Redis by -`PSYNC`. + `PSYNC`. -For more information about replication in Redis please check the [replication -page][tr]. +For more information about replication in Redis please check the +[replication page][tr]. [tr]: /topics/replication @return -**Non standard return value**, a bulk transfer of the data followed by `PING` -and write requests from the master. +**Non standard return value**, a bulk transfer of the data followed by `PING` and write requests from the master. diff --git a/iredis/data/commands/time.md b/iredis/data/commands/time.md index 441376e..2cf1af6 100644 --- a/iredis/data/commands/time.md +++ b/iredis/data/commands/time.md @@ -9,8 +9,8 @@ call. A multi bulk reply containing two elements: -- unix time in seconds. -- microseconds. +* unix time in seconds. +* microseconds. @examples diff --git a/iredis/data/commands/touch.md b/iredis/data/commands/touch.md index eee3365..a369354 100644 --- a/iredis/data/commands/touch.md +++ b/iredis/data/commands/touch.md @@ -1,4 +1,5 @@ -Alters the last access time of a key(s). A key is ignored if it does not exist. +Alters the last access time of a key(s). +A key is ignored if it does not exist. @return diff --git a/iredis/data/commands/ttl.md b/iredis/data/commands/ttl.md index c36557c..15821e1 100644 --- a/iredis/data/commands/ttl.md +++ b/iredis/data/commands/ttl.md @@ -1,22 +1,19 @@ -Returns the remaining time to live of a key that has a timeout. This -introspection capability allows a Redis client to check how many seconds a given -key will continue to be part of the dataset. +Returns the remaining time to live of a key that has a timeout. +This introspection capability allows a Redis client to check how many seconds a +given key will continue to be part of the dataset. -In Redis 2.6 or older the command returns `-1` if the key does not exist or if -the key exist but has no associated expire. +In Redis 2.6 or older the command returns `-1` if the key does not exist or if the key exist but has no associated expire. Starting with Redis 2.8 the return value in case of error changed: -- The command returns `-2` if the key does not exist. -- The command returns `-1` if the key exists but has no associated expire. +* The command returns `-2` if the key does not exist. +* The command returns `-1` if the key exists but has no associated expire. -See also the `PTTL` command that returns the same information with milliseconds -resolution (Only available in Redis 2.6 or greater). +See also the `PTTL` command that returns the same information with milliseconds resolution (Only available in Redis 2.6 or greater). @return -@integer-reply: TTL in seconds, or a negative value in order to signal an error -(see the description above). +@integer-reply: TTL in seconds, or a negative value in order to signal an error (see the description above). @examples diff --git a/iredis/data/commands/type.md b/iredis/data/commands/type.md index d27a7e8..8a818e0 100644 --- a/iredis/data/commands/type.md +++ b/iredis/data/commands/type.md @@ -1,5 +1,5 @@ -Returns the string representation of the type of the value stored at `key`. The -different types that can be returned are: `string`, `list`, `set`, `zset`, +Returns the string representation of the type of the value stored at `key`. +The different types that can be returned are: `string`, `list`, `set`, `zset`, `hash` and `stream`. @return diff --git a/iredis/data/commands/unlink.md b/iredis/data/commands/unlink.md index e305440..c91dd66 100644 --- a/iredis/data/commands/unlink.md +++ b/iredis/data/commands/unlink.md @@ -1,9 +1,9 @@ -This command is very similar to `DEL`: it removes the specified keys. Just like -`DEL` a key is ignored if it does not exist. However the command performs the -actual memory reclaiming in a different thread, so it is not blocking, while -`DEL` is. This is where the command name comes from: the command just -**unlinks** the keys from the keyspace. The actual removal will happen later -asynchronously. +This command is very similar to `DEL`: it removes the specified keys. +Just like `DEL` a key is ignored if it does not exist. However the command +performs the actual memory reclaiming in a different thread, so it is not +blocking, while `DEL` is. This is where the command name comes from: the +command just **unlinks** the keys from the keyspace. The actual removal +will happen later asynchronously. @return diff --git a/iredis/data/commands/unsubscribe.md b/iredis/data/commands/unsubscribe.md index 78c4d0c..7bdf1d1 100644 --- a/iredis/data/commands/unsubscribe.md +++ b/iredis/data/commands/unsubscribe.md @@ -2,5 +2,6 @@ Unsubscribes the client from the given channels, or from all of them if none is given. When no channels are specified, the client is unsubscribed from all the -previously subscribed channels. In this case, a message for every unsubscribed -channel will be sent to the client. +previously subscribed channels. +In this case, a message for every unsubscribed channel will be sent to the +client. diff --git a/iredis/data/commands/wait.md b/iredis/data/commands/wait.md index e5a179c..d3636ae 100644 --- a/iredis/data/commands/wait.md +++ b/iredis/data/commands/wait.md @@ -1,61 +1,45 @@ -This command blocks the current client until all the previous write commands are -successfully transferred and acknowledged by at least the specified number of -replicas. If the timeout, specified in milliseconds, is reached, the command +This command blocks the current client until all the previous write commands +are successfully transferred and acknowledged by at least the specified number +of replicas. If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of replicas were not yet reached. -The command **will always return** the number of replicas that acknowledged the -write commands sent before the `WAIT` command, both in the case where the -specified number of replicas are reached, or when the timeout is reached. +The command **will always return** the number of replicas that acknowledged +the write commands sent before the `WAIT` command, both in the case where +the specified number of replicas are reached, or when the timeout is reached. A few remarks: -1. When `WAIT` returns, all the previous write commands sent in the context of - the current connection are guaranteed to be received by the number of - replicas returned by `WAIT`. -2. If the command is sent as part of a `MULTI` transaction, the command does not - block but instead just return ASAP the number of replicas that acknowledged - the previous write commands. +1. When `WAIT` returns, all the previous write commands sent in the context of the current connection are guaranteed to be received by the number of replicas returned by `WAIT`. +2. If the command is sent as part of a `MULTI` transaction, the command does not block but instead just return ASAP the number of replicas that acknowledged the previous write commands. 3. A timeout of 0 means to block forever. -4. Since `WAIT` returns the number of replicas reached both in case of failure - and success, the client should check that the returned value is equal or - greater to the replication level it demanded. +4. Since `WAIT` returns the number of replicas reached both in case of failure and success, the client should check that the returned value is equal or greater to the replication level it demanded. -## Consistency and WAIT +Consistency and WAIT +--- -Note that `WAIT` does not make Redis a strongly consistent store: while -synchronous replication is part of a replicated state machine, it is not the -only thing needed. However in the context of Sentinel or Redis Cluster failover, -`WAIT` improves the real world data safety. +Note that `WAIT` does not make Redis a strongly consistent store: while synchronous replication is part of a replicated state machine, it is not the only thing needed. However in the context of Sentinel or Redis Cluster failover, `WAIT` improves the real world data safety. -Specifically if a given write is transferred to one or more replicas, it is more -likely (but not guaranteed) that if the master fails, we'll be able to promote, -during a failover, a replica that received the write: both Sentinel and Redis -Cluster will do a best-effort attempt to promote the best replica among the set -of available replicas. +Specifically if a given write is transferred to one or more replicas, it is more likely (but not guaranteed) that if the master fails, we'll be able to promote, during a failover, a replica that received the write: both Sentinel and Redis Cluster will do a best-effort attempt to promote the best replica among the set of available replicas. -However this is just a best-effort attempt so it is possible to still lose a -write synchronously replicated to multiple replicas. +However this is just a best-effort attempt so it is possible to still lose a write synchronously replicated to multiple replicas. -## Implementation details +Implementation details +--- -Since the introduction of partial resynchronization with replicas (PSYNC -feature) Redis replicas asynchronously ping their master with the offset they -already processed in the replication stream. This is used in multiple ways: +Since the introduction of partial resynchronization with replicas (PSYNC feature) Redis replicas asynchronously ping their master with the offset they already processed in the replication stream. This is used in multiple ways: 1. Detect timed out replicas. 2. Perform a partial resynchronization after a disconnection. 3. Implement `WAIT`. -In the specific case of the implementation of `WAIT`, Redis remembers, for each -client, the replication offset of the produced replication stream when a given +In the specific case of the implementation of `WAIT`, Redis remembers, for each client, the replication offset of the produced replication stream when a given write command was executed in the context of a given client. When `WAIT` is called Redis checks if the specified number of replicas already acknowledged this offset or a greater one. @return -@integer-reply: The command returns the number of replicas reached by all the -writes performed in the context of the current connection. +@integer-reply: The command returns the number of replicas reached by all the writes performed in the context of the current connection. @examples @@ -68,8 +52,4 @@ OK (integer) 1 ``` -In the following example the first call to `WAIT` does not use a timeout and -asks for the write to reach 1 replica. It returns with success. In the second -attempt instead we put a timeout, and ask for the replication of the write to -two replicas. Since there is a single replica available, after one second `WAIT` -unblocks and returns 1, the number of replicas reached. +In the following example the first call to `WAIT` does not use a timeout and asks for the write to reach 1 replica. It returns with success. In the second attempt instead we put a timeout, and ask for the replication of the write to two replicas. Since there is a single replica available, after one second `WAIT` unblocks and returns 1, the number of replicas reached. diff --git a/iredis/data/commands/xack.md b/iredis/data/commands/xack.md index eb7ab83..aae2db5 100644 --- a/iredis/data/commands/xack.md +++ b/iredis/data/commands/xack.md @@ -1,25 +1,27 @@ -The `XACK` command removes one or multiple messages from the _Pending Entries -List_ (PEL) of a stream consumer group. A message is pending, and as such stored -inside the PEL, when it was delivered to some consumer, normally as a side -effect of calling `XREADGROUP`, or when a consumer took ownership of a message -calling `XCLAIM`. The pending message was delivered to some consumer but the -server is yet not sure it was processed at least once. So new calls to -`XREADGROUP` to grab the messages history for a consumer (for instance using an -ID of 0), will return such message. Similarly the pending message will be listed -by the `XPENDING` command, that inspects the PEL. +The `XACK` command removes one or multiple messages from the +*Pending Entries List* (PEL) of a stream consumer group. A message is pending, +and as such stored inside the PEL, when it was delivered to some consumer, +normally as a side effect of calling `XREADGROUP`, or when a consumer took +ownership of a message calling `XCLAIM`. The pending message was delivered to +some consumer but the server is yet not sure it was processed at least once. +So new calls to `XREADGROUP` to grab the messages history for a consumer +(for instance using an ID of 0), will return such message. +Similarly the pending message will be listed by the `XPENDING` command, +that inspects the PEL. -Once a consumer _successfully_ processes a message, it should call `XACK` so -that such message does not get processed again, and as a side effect, the PEL -entry about this message is also purged, releasing memory from the Redis server. +Once a consumer *successfully* processes a message, it should call `XACK` +so that such message does not get processed again, and as a side effect, +the PEL entry about this message is also purged, releasing memory from the +Redis server. @return @integer-reply, specifically: -The command returns the number of messages successfully acknowledged. Certain -message IDs may no longer be part of the PEL (for example because they have -already been acknowledged), and XACK will not count them as successfully -acknowledged. +The command returns the number of messages successfully acknowledged. +Certain message IDs may no longer be part of the PEL (for example because +they have already been acknowledged), and XACK will not count them as +successfully acknowledged. @examples diff --git a/iredis/data/commands/xadd.md b/iredis/data/commands/xadd.md index 8a80556..d651a68 100644 --- a/iredis/data/commands/xadd.md +++ b/iredis/data/commands/xadd.md @@ -1,67 +1,71 @@ -Appends the specified stream entry to the stream at the specified key. If the -key does not exist, as a side effect of running this command the key is created -with a stream value. The creation of stream's key can be disabled with the -`NOMKSTREAM` option. +Appends the specified stream entry to the stream at the specified key. +If the key does not exist, as a side effect of running this command the +key is created with a stream value. The creation of stream's key can be +disabled with the `NOMKSTREAM` option. -An entry is composed of a set of field-value pairs, it is basically a small -dictionary. The field-value pairs are stored in the same order they are given by -the user, and commands to read the stream such as `XRANGE` or `XREAD` are -guaranteed to return the fields and values exactly in the same order they were -added by `XADD`. +An entry is composed of a list of field-value pairs. +The field-value pairs are stored in the same order they are given by the user. +Commands that read the stream, such as `XRANGE` or `XREAD`, are guaranteed to return the fields and values exactly in the same order they were added by `XADD`. -`XADD` is the _only Redis command_ that can add data to a stream, but there are -other commands, such as `XDEL` and `XTRIM`, that are able to remove data from a -stream. +`XADD` is the *only Redis command* that can add data to a stream, but +there are other commands, such as `XDEL` and `XTRIM`, that are able to +remove data from a stream. ## Specifying a Stream ID as an argument A stream entry ID identifies a given entry inside a stream. The `XADD` command will auto-generate a unique ID for you if the ID argument -specified is the `*` character (asterisk ASCII character). However, while useful -only in very rare cases, it is possible to specify a well-formed ID, so that the -new entry will be added exactly with the specified ID. +specified is the `*` character (asterisk ASCII character). However, while +useful only in very rare cases, it is possible to specify a well-formed ID, so +that the new entry will be added exactly with the specified ID. IDs are specified by two numbers separated by a `-` character: 1526919030474-55 -Both quantities are 64-bit numbers. When an ID is auto-generated, the first part -is the Unix time in milliseconds of the Redis instance generating the ID. The -second part is just a sequence number and is used in order to distinguish IDs -generated in the same millisecond. - -IDs are guaranteed to be always incremental: If you compare the ID of the entry -just inserted it will be greater than any other past ID, so entries are totally -ordered inside a stream. In order to guarantee this property, if the current top -ID in the stream has a time greater than the current local time of the instance, -the top entry time will be used instead, and the sequence part of the ID -incremented. This may happen when, for instance, the local clock jumps backward, -or if after a failover the new master has a different absolute time. - -When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`, -and the user _must_ specify an ID which is greater than any other ID currently -inside the stream, otherwise the command will fail and return an error. Usually +Both quantities are 64-bit numbers. When an ID is auto-generated, the +first part is the Unix time in milliseconds of the Redis instance generating +the ID. The second part is just a sequence number and is used in order to +distinguish IDs generated in the same millisecond. + +You can also specify an incomplete ID, that consists only of the milliseconds part, which is interpreted as a zero value for sequence part. +To have only the sequence part automatically generated, specify the milliseconds part followed by the `-` separator and the `*` character: + +``` +> XADD mystream 1526919030474-55 message "Hello," +"1526919030474-55" +> XADD mystream 1526919030474-* message " World!" +"1526919030474-56" +``` + +IDs are guaranteed to be always incremental: If you compare the ID of the +entry just inserted it will be greater than any other past ID, so entries +are totally ordered inside a stream. In order to guarantee this property, +if the current top ID in the stream has a time greater than the current +local time of the instance, the top entry time will be used instead, and +the sequence part of the ID incremented. This may happen when, for instance, +the local clock jumps backward, or if after a failover the new master has +a different absolute time. + +When a user specified an explicit ID to `XADD`, the minimum valid ID is +`0-1`, and the user *must* specify an ID which is greater than any other +ID currently inside the stream, otherwise the command will fail and return an error. Usually resorting to specific IDs is useful only if you have another system generating -unique IDs (for instance an SQL table) and you really want the Redis stream IDs -to match the one of this other system. +unique IDs (for instance an SQL table) and you really want the Redis stream +IDs to match the one of this other system. ## Capped streams -`XADD` incorporates the same semantics as the `XTRIM` command - refer to its -documentation page for more information. This allows adding new entries and -keeping the stream's size in check with a single call to `XADD`, effectively -capping the stream with an arbitrary threshold. Although exact trimming is -possible and is the default, due to the internal representation of streams it is -more efficient to add an entry and trim stream with `XADD` using **almost -exact** trimming (the `~` argument). +`XADD` incorporates the same semantics as the `XTRIM` command - refer to its documentation page for more information. +This allows adding new entries and keeping the stream's size in check with a single call to `XADD`, effectively capping the stream with an arbitrary threshold. +Although exact trimming is possible and is the default, due to the internal representation of steams it is more efficient to add an entry and trim stream with `XADD` using **almost exact** trimming (the `~` argument). For example, calling `XADD` in the following form: XADD mystream MAXLEN ~ 1000 * ... entry fields here ... - -Will add a new entry but will also evict old entries so that the stream will -contain only 1000 entries, or at most a few tens more. + +Will add a new entry but will also evict old entries so that the stream will contain only 1000 entries, or at most a few tens more. ## Additional information about streams @@ -79,11 +83,6 @@ specified by the user during insertion. The command returns a @nil-reply when used with the `NOMKSTREAM` option and the key doesn't exist. -@history - -- `>= 6.2`: Added the `NOMKSTREAM` option, `MINID` trimming strategy and the - `LIMIT` option. - @examples ```cli diff --git a/iredis/data/commands/xautoclaim.md b/iredis/data/commands/xautoclaim.md index fe02fc5..5ff44f2 100644 --- a/iredis/data/commands/xautoclaim.md +++ b/iredis/data/commands/xautoclaim.md @@ -1,57 +1,39 @@ -This command transfers ownership of pending stream entries that match the -specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling -`XPENDING` and then `XCLAIM`, but provides a more straightforward way to deal -with message delivery failures via `SCAN`-like semantics. +This command transfers ownership of pending stream entries that match the specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling `XPENDING` and then `XCLAIM`, +but provides a more straightforward way to deal with message delivery failures via `SCAN`-like semantics. -Like `XCLAIM`, the command operates on the stream entries at `<key>` and in the -context of the provided `<group>`. It transfers ownership to `<consumer>` of -messages pending for more than `<min-idle-time>` milliseconds and having an -equal or greater ID than `<start>`. +Like `XCLAIM`, the command operates on the stream entries at `<key>` and in the context of the provided `<group>`. +It transfers ownership to `<consumer>` of messages pending for more than `<min-idle-time>` milliseconds and having an equal or greater ID than `<start>`. -The optional `<count>` argument, which defaults to 100, is the upper limit of -the number of entries that the command attempts to claim. Internally, the -command begins scanning the consumer group's Pending Entries List (PEL) from -`<start>` and filters out entries having an idle time less than or equal to -`<min-idle-time>`. The maximum number of pending entries that the command scans -is the product of multiplying `<count>`'s value by 10 (hard-coded). It is -possible, therefore, that the number of entries claimed will be less than the -specified value. +The optional `<count>` argument, which defaults to 100, is the upper limit of the number of entries that the command attempts to claim. +Internally, the command begins scanning the consumer group's Pending Entries List (PEL) from `<start>` and filters out entries having an idle time less than or equal to `<min-idle-time>`. +The maximum number of pending entries that the command scans is the product of multiplying `<count>`'s value by 10 (hard-coded). +It is possible, therefore, that the number of entries claimed will be less than the specified value. -The optional `JUSTID` argument changes the reply to return just an array of IDs -of messages successfully claimed, without returning the actual message. Using -this option means the retry counter is not incremented. +The optional `JUSTID` argument changes the reply to return just an array of IDs of messages successfully claimed, without returning the actual message. +Using this option means the retry counter is not incremented. -The command returns the claimed entries as an array. It also returns a stream ID -intended for cursor-like use as the `<start>` argument for its subsequent call. -When there are no remaining PEL entries, the command returns the special `0-0` -ID to signal completion. However, note that you may want to continue calling -`XAUTOCLAIM` even after the scan is complete with the `0-0` as `<start>` ID, -because enough time passed, so older pending entries may now be eligible for -claiming. +The command returns the claimed entries as an array. It also returns a stream ID intended for cursor-like use as the `<start>` argument for its subsequent call. +When there are no remaining PEL entries, the command returns the special `0-0` ID to signal completion. +However, note that you may want to continue calling `XAUTOCLAIM` even after the scan is complete with the `0-0` as `<start>` ID, because enough time passed, so older pending entries may now be eligible for claiming. -Note that only messages that are idle longer than `<min-idle-time>` are claimed, -and claiming a message resets its idle time. This ensures that only a single -consumer can successfully claim a given pending message at a specific instant of -time and trivially reduces the probability of processing the same message -multiple times. +Note that only messages that are idle longer than `<min-idle-time>` are claimed, and claiming a message resets its idle time. +This ensures that only a single consumer can successfully claim a given pending message at a specific instant of time and trivially reduces the probability of processing the same message multiple times. -Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted -deliveries count for that message, unless the `JUSTID` option has been specified -(which only delivers the message ID, not the message itself). Messages that -cannot be processed for some reason - for example, because consumers -systematically crash when processing them - will exhibit high attempted delivery -counts that can be detected by monitoring. +While iterating the PEL, if `XAUTOCLAIM` stumbles upon a message which doesn't exist in the stream anymore (either trimmed or deleted by `XDEL`) it does not claim it, and deletes it from the PEL in which it was found. This feature was introduced in Redis 7.0. +These message IDs are returned to the caller as a part of `XAUTOCLAIM`s reply. + +Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted deliveries count for that message, unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). +Messages that cannot be processed for some reason - for example, because consumers systematically crash when processing them - will exhibit high attempted delivery counts that can be detected by monitoring. @return @array-reply, specifically: -An array with two elements: +An array with three elements: -1. The first element is a stream ID to be used as the `<start>` argument for the - next call to `XAUTOCLAIM` -2. The second element is an array containing all the successfully claimed - messages in the same format as `XRANGE`. +1. A stream ID to be used as the `<start>` argument for the next call to `XAUTOCLAIM`. +2. An array containing all the successfully claimed messages in the same format as `XRANGE`. +3. An array containing message IDs that no longer exist in the stream, and were deleted from the PEL in which they were found. @examples @@ -61,10 +43,10 @@ An array with two elements: 2) 1) 1) "1609338752495-0" 2) 1) "field" 2) "value" +3) (empty array) ``` -In the above example, we attempt to claim up to 25 entries that are pending and -idle (not having been acknowledged or claimed) for at least an hour, starting at -the stream's beginning. The consumer "Alice" from the "mygroup" group acquires -ownership of these messages. Note that the stream ID returned in the example is -`0-0`, indicating that the entire stream was scanned. +In the above example, we attempt to claim up to 25 entries that are pending and idle (not having been acknowledged or claimed) for at least an hour, starting at the stream's beginning. +The consumer "Alice" from the "mygroup" group acquires ownership of these messages. +Note that the stream ID returned in the example is `0-0`, indicating that the entire stream was scanned. +We can also see that `XAUTOCLAIM` did not stumble upon any deleted messages (the third reply element is an empty array). diff --git a/iredis/data/commands/xclaim.md b/iredis/data/commands/xclaim.md index 7b611d2..d3a17dc 100644 --- a/iredis/data/commands/xclaim.md +++ b/iredis/data/commands/xclaim.md @@ -1,73 +1,47 @@ -In the context of a stream consumer group, this command changes the ownership of -a pending message, so that the new owner is the consumer specified as the +In the context of a stream consumer group, this command changes the ownership +of a pending message, so that the new owner is the consumer specified as the command argument. Normally this is what happens: 1. There is a stream with an associated consumer group. -2. Some consumer A reads a message via `XREADGROUP` from a stream, in the - context of that consumer group. -3. As a side effect a pending message entry is created in the Pending Entries - List (PEL) of the consumer group: it means the message was delivered to a - given consumer, but it was not yet acknowledged via `XACK`. +2. Some consumer A reads a message via `XREADGROUP` from a stream, in the context of that consumer group. +3. As a side effect a pending message entry is created in the Pending Entries List (PEL) of the consumer group: it means the message was delivered to a given consumer, but it was not yet acknowledged via `XACK`. 4. Then suddenly that consumer fails forever. -5. Other consumers may inspect the list of pending messages, that are stale for - quite some time, using the `XPENDING` command. In order to continue - processing such messages, they use `XCLAIM` to acquire the ownership of the - message and continue. As of Redis 6.2, consumers can use the `XAUTOCLAIM` - command to automatically scan and claim stale pending messages. - -This dynamic is clearly explained in the -[Stream intro documentation](/topics/streams-intro). - -Note that the message is claimed only if its idle time is greater the minimum -idle time we specify when calling `XCLAIM`. Because as a side effect `XCLAIM` -will also reset the idle time (since this is a new attempt at processing the -message), two consumers trying to claim a message at the same time will never -both succeed: only one will successfully claim the message. This avoids that we -process a given message multiple times in a trivial way (yet multiple processing -is possible and unavoidable in the general case). - -Moreover, as a side effect, `XCLAIM` will increment the count of attempted -deliveries of the message unless the `JUSTID` option has been specified (which -only delivers the message ID, not the message itself). In this way messages that -cannot be processed for some reason, for instance because the consumers crash -attempting to process them, will start to have a larger counter and can be -detected inside the system. +5. Other consumers may inspect the list of pending messages, that are stale for quite some time, using the `XPENDING` command. In order to continue processing such messages, they use `XCLAIM` to acquire the ownership of the message and continue. Consumers can also use the `XAUTOCLAIM` command to automatically scan and claim stale pending messages. + +This dynamic is clearly explained in the [Stream intro documentation](/topics/streams-intro). + +Note that the message is claimed only if its idle time is greater the minimum idle time we specify when calling `XCLAIM`. Because as a side effect `XCLAIM` will also reset the idle time (since this is a new attempt at processing the message), two consumers trying to claim a message at the same time will never both succeed: only one will successfully claim the message. This avoids that we process a given message multiple times in a trivial way (yet multiple processing is possible and unavoidable in the general case). + +Moreover, as a side effect, `XCLAIM` will increment the count of attempted deliveries of the message unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). In this way messages that cannot be processed for some reason, for instance because the consumers crash attempting to process them, will start to have a larger counter and can be detected inside the system. + +`XCLAIM` will not claim a message in the following cases: + +1. The message doesn't exist in the group PEL (i.e. it was never read by any consumer) +2. The message exists in the group PEL but not in the stream itself (i.e. the message was read but never acknowledged, and then was deleted from the stream, either by trimming or by `XDEL`) + +In both cases the reply will not contain a corresponding entry to that message (i.e. the length of the reply array may be smaller than the number of IDs provided to `XCLAIM`). +In the latter case, the message will also be deleted from the PEL in which it was found. This feature was introduced in Redis 7.0. ## Command options The command has multiple options, however most are mainly for internal use in -order to transfer the effects of `XCLAIM` or other commands to the AOF file and -to propagate the same effects to the slaves, and are unlikely to be useful to -normal users: - -1. `IDLE <ms>`: Set the idle time (last time it was delivered) of the message. - If IDLE is not specified, an IDLE of 0 is assumed, that is, the time count is - reset because the message has now a new owner trying to process it. -2. `TIME <ms-unix-time>`: This is the same as IDLE but instead of a relative - amount of milliseconds, it sets the idle time to a specific Unix time (in - milliseconds). This is useful in order to rewrite the AOF file generating - `XCLAIM` commands. -3. `RETRYCOUNT <count>`: Set the retry counter to the specified value. This - counter is incremented every time a message is delivered again. Normally - `XCLAIM` does not alter this counter, which is just served to clients when - the XPENDING command is called: this way clients can detect anomalies, like - messages that are never processed for some reason after a big number of - delivery attempts. -4. `FORCE`: Creates the pending message entry in the PEL even if certain - specified IDs are not already in the PEL assigned to a different client. - However the message must be exist in the stream, otherwise the IDs of non - existing messages are ignored. -5. `JUSTID`: Return just an array of IDs of messages successfully claimed, - without returning the actual message. Using this option means the retry - counter is not incremented. +order to transfer the effects of `XCLAIM` or other commands to the AOF file +and to propagate the same effects to the replicas, and are unlikely to be +useful to normal users: + +1. `IDLE <ms>`: Set the idle time (last time it was delivered) of the message. If IDLE is not specified, an IDLE of 0 is assumed, that is, the time count is reset because the message has now a new owner trying to process it. +2. `TIME <ms-unix-time>`: This is the same as IDLE but instead of a relative amount of milliseconds, it sets the idle time to a specific Unix time (in milliseconds). This is useful in order to rewrite the AOF file generating `XCLAIM` commands. +3. `RETRYCOUNT <count>`: Set the retry counter to the specified value. This counter is incremented every time a message is delivered again. Normally `XCLAIM` does not alter this counter, which is just served to clients when the XPENDING command is called: this way clients can detect anomalies, like messages that are never processed for some reason after a big number of delivery attempts. +4. `FORCE`: Creates the pending message entry in the PEL even if certain specified IDs are not already in the PEL assigned to a different client. However the message must be exist in the stream, otherwise the IDs of non existing messages are ignored. +5. `JUSTID`: Return just an array of IDs of messages successfully claimed, without returning the actual message. Using this option means the retry counter is not incremented. @return @array-reply, specifically: -The command returns all the messages successfully claimed, in the same format as -`XRANGE`. However if the `JUSTID` option was specified, only the message IDs are -reported, without including the actual message. +The command returns all the messages successfully claimed, in the same format +as `XRANGE`. However if the `JUSTID` option was specified, only the message +IDs are reported, without including the actual message. @examples @@ -78,7 +52,4 @@ reported, without including the actual message. 2) "orange" ``` -In the above example we claim the message with ID `1526569498055-0`, only if the -message is idle for at least one hour without the original consumer or some -other consumer making progresses (acknowledging or claiming it), and assigns the -ownership to the consumer `Alice`. +In the above example we claim the message with ID `1526569498055-0`, only if the message is idle for at least one hour without the original consumer or some other consumer making progresses (acknowledging or claiming it), and assigns the ownership to the consumer `Alice`. diff --git a/iredis/data/commands/xdel.md b/iredis/data/commands/xdel.md index 3f507a8..3ee4a3d 100644 --- a/iredis/data/commands/xdel.md +++ b/iredis/data/commands/xdel.md @@ -1,30 +1,30 @@ Removes the specified entries from a stream, and returns the number of entries -deleted, that may be different from the number of IDs passed to the command in -case certain IDs do not exist. +deleted. This number may be less than the number of IDs passed to the command in +the case where some of the specified IDs do not exist in the stream. Normally you may think at a Redis stream as an append-only data structure, -however Redis streams are represented in memory, so we are able to also delete -entries. This may be useful, for instance, in order to comply with certain -privacy policies. +however Redis streams are represented in memory, so we are also able to +delete entries. This may be useful, for instance, in order to comply with +certain privacy policies. -# Understanding the low level details of entries deletion +## Understanding the low level details of entries deletion -Redis streams are represented in a way that makes them memory efficient: a radix -tree is used in order to index macro-nodes that pack linearly tens of stream -entries. Normally what happens when you delete an entry from a stream is that -the entry is not _really_ evicted, it just gets marked as deleted. +Redis streams are represented in a way that makes them memory efficient: +a radix tree is used in order to index macro-nodes that pack linearly tens +of stream entries. Normally what happens when you delete an entry from a stream +is that the entry is not *really* evicted, it just gets marked as deleted. Eventually if all the entries in a macro-node are marked as deleted, the whole -node is destroyed and the memory reclaimed. This means that if you delete a -large amount of entries from a stream, for instance more than 50% of the entries -appended to the stream, the memory usage per entry may increment, since what -happens is that the stream will start to be fragmented. However the stream -performances will remain the same. +node is destroyed and the memory reclaimed. This means that if you delete +a large amount of entries from a stream, for instance more than 50% of the +entries appended to the stream, the memory usage per entry may increment, since +what happens is that the stream will become fragmented. However the stream +performance will remain the same. In future versions of Redis it is possible that we'll trigger a node garbage -collection in case a given macro-node reaches a given amount of deleted entries. -Currently with the usage we anticipate for this data structure, it is not a good -idea to add such complexity. +collection in case a given macro-node reaches a given amount of deleted +entries. Currently with the usage we anticipate for this data structure, it is +not a good idea to add such complexity. @return diff --git a/iredis/data/commands/xgroup-create.md b/iredis/data/commands/xgroup-create.md new file mode 100644 index 0000000..f0f1606 --- /dev/null +++ b/iredis/data/commands/xgroup-create.md @@ -0,0 +1,23 @@ +This command creates a new consumer group uniquely identified by `<groupname>` for the stream stored at `<key>`. + +Every group has a unique name in a given stream. When a consumer group with the same name already exists, the command returns a `-BUSYGROUP` error. + +The command's `<id>` argument specifies the last delivered entry in the stream from the new group's perspective. +The special ID `$` means the ID of the last entry in the stream, but you can provide any valid ID instead. +For example, if you want the group's consumers to fetch the entire stream from the beginning, use zero as the starting ID for the consumer group: + + XGROUP CREATE mystream mygroup 0 + +By default, the `XGROUP CREATE` command insists that the target stream exists and returns an error when it doesn't. +However, you can use the optional `MKSTREAM` subcommand as the last argument after the `<id>` to automatically create the stream (with length of 0) if it doesn't exist: + + XGROUP CREATE mystream mygroup $ MKSTREAM + +The optional `entries_read` named argument can be specified to enable consumer group lag tracking for an arbitrary ID. +An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. +This can be useful you know exactly how many entries are between the arbitrary ID (excluding it) and the stream's last entry. +In such cases, the `entries_read` can be set to the stream's `entries_added` subtracted with the number of entries. + +@return + +@simple-string-reply: `OK` on success. diff --git a/iredis/data/commands/xgroup-createconsumer.md b/iredis/data/commands/xgroup-createconsumer.md new file mode 100644 index 0000000..17274a5 --- /dev/null +++ b/iredis/data/commands/xgroup-createconsumer.md @@ -0,0 +1,7 @@ +Create a consumer named `<consumername>` in the consumer group `<groupname>` of the stream that's stored at `<key>`. + +Consumers are also created automatically whenever an operation, such as `XREADGROUP`, references a consumer that doesn't exist. + +@return + +@integer-reply: the number of created consumers (0 or 1)
\ No newline at end of file diff --git a/iredis/data/commands/xgroup-delconsumer.md b/iredis/data/commands/xgroup-delconsumer.md new file mode 100644 index 0000000..9e73da8 --- /dev/null +++ b/iredis/data/commands/xgroup-delconsumer.md @@ -0,0 +1,10 @@ +The `XGROUP DELCONSUMER` command deletes a consumer from the consumer group. + +Sometimes it may be useful to remove old consumers since they are no longer used. + +Note, however, that any pending messages that the consumer had will become unclaimable after it was deleted. +It is strongly recommended, therefore, that any pending messages are claimed or acknowledged prior to deleting the consumer from the group. + +@return + +@integer-reply: the number of pending messages that the consumer had before it was deleted diff --git a/iredis/data/commands/xgroup-destroy.md b/iredis/data/commands/xgroup-destroy.md new file mode 100644 index 0000000..448468b --- /dev/null +++ b/iredis/data/commands/xgroup-destroy.md @@ -0,0 +1,7 @@ +The `XGROUP DESTROY` command completely destroys a consumer group. + +The consumer group will be destroyed even if there are active consumers, and pending messages, so make sure to call this command only when really needed. + +@return + +@integer-reply: the number of destroyed consumer groups (0 or 1)
\ No newline at end of file diff --git a/iredis/data/commands/xgroup-help.md b/iredis/data/commands/xgroup-help.md new file mode 100644 index 0000000..1eb1a7b --- /dev/null +++ b/iredis/data/commands/xgroup-help.md @@ -0,0 +1,5 @@ +The `XGROUP HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/xgroup-setid.md b/iredis/data/commands/xgroup-setid.md new file mode 100644 index 0000000..0808404 --- /dev/null +++ b/iredis/data/commands/xgroup-setid.md @@ -0,0 +1,16 @@ +Set the **last delivered ID** for a consumer group. + +Normally, a consumer group's last delivered ID is set when the group is created with `XGROUP CREATE`. +The `XGROUP SETID` command allows modifying the group's last delivered ID, without having to delete and recreate the group. +For instance if you want the consumers in a consumer group to re-process all the messages in a stream, you may want to set its next ID to 0: + + XGROUP SETID mystream mygroup 0 + +The optional `entries_read` argument can be specified to enable consumer group lag tracking for an arbitrary ID. +An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. +This can be useful you know exactly how many entries are between the arbitrary ID (excluding it) and the stream's last entry. +In such cases, the `entries_read` can be set to the stream's `entries_added` subtracted with the number of entries. + +@return + +@simple-string-reply: `OK` on success. diff --git a/iredis/data/commands/xgroup.md b/iredis/data/commands/xgroup.md index 80948a6..e7b517a 100644 --- a/iredis/data/commands/xgroup.md +++ b/iredis/data/commands/xgroup.md @@ -1,81 +1,3 @@ -This command is used in order to manage the consumer groups associated with a -stream data structure. Using `XGROUP` you can: +This is a container command for stream consumer group management commands. -- Create a new consumer group associated with a stream. -- Destroy a consumer group. -- Remove a specific consumer from a consumer group. -- Set the consumer group _last delivered ID_ to something else. - -To create a new consumer group, use the following form: - - XGROUP CREATE mystream consumer-group-name $ - -The last argument is the ID of the last item in the stream to consider already -delivered. In the above case we used the special ID '$' (that means: the ID of -the last item in the stream). In this case the consumers fetching data from that -consumer group will only see new elements arriving in the stream. - -If instead you want consumers to fetch the whole stream history, use zero as the -starting ID for the consumer group: - - XGROUP CREATE mystream consumer-group-name 0 - -Of course it is also possible to use any other valid ID. If the specified -consumer group already exists, the command returns a `-BUSYGROUP` error. -Otherwise, the operation is performed and a @simple-string-reply `OK` is -returned. There are no hard limits to the number of consumer groups you can -associate with a given stream. - -If the specified stream doesn't exist when creating a group, an error will be -returned. You can use the optional `MKSTREAM` subcommand as the last argument -after the `ID` to automatically create the stream, if it doesn't exist. Note -that if the stream is created in this way it will have a length of 0: - - XGROUP CREATE mystream consumer-group-name $ MKSTREAM - -A consumer group can be destroyed completely by using the following form: - - XGROUP DESTROY mystream consumer-group-name - -The consumer group will be destroyed even if there are active consumers and -pending messages, so make sure to call this command only when really needed. -This form returns an @integer-reply with the number of destroyed consumer groups -(0 or 1). - -Consumers in a consumer group are auto-created every time a new consumer name is -mentioned by some command. They can also be explicitly created by using the -following form: - - XGROUP CREATECONSUMER mystream consumer-group-name myconsumer123 - -This form returns an @integer-reply with the number of created consumers (0 or -1). - -To just remove a given consumer from a consumer group, the following form is -used: - - XGROUP DELCONSUMER mystream consumer-group-name myconsumer123 - -Sometimes it may be useful to remove old consumers since they are no longer -used. This form returns an @integer-reply with the number of pending messages -that the consumer had before it was deleted. - -Finally it possible to set the next message to deliver using the `SETID` -subcommand. Normally the next ID is set when the consumer is created, as the -last argument of `XGROUP CREATE`. However using this form the next ID can be -modified later without deleting and creating the consumer group again. For -instance if you want the consumers in a consumer group to re-process all the -messages in a stream, you may want to set its next ID to 0: - - XGROUP SETID mystream consumer-group-name 0 - -This form returns a @simple-string-reply `OK` or an error. - -Finally to get some help if you don't remember the syntax, use the HELP -subcommand: - - XGROUP HELP - -@history - -- `>= 6.2.0`: Supports the `CREATECONSUMER` subcommand. +To see the list of available commands you can call `XGROUP HELP`. diff --git a/iredis/data/commands/xinfo-consumers.md b/iredis/data/commands/xinfo-consumers.md new file mode 100644 index 0000000..f65366d --- /dev/null +++ b/iredis/data/commands/xinfo-consumers.md @@ -0,0 +1,29 @@ +This command returns the list of consumers that belong to the `<groupname>` consumer group of the stream stored at `<key>`. + +The following information is provided for each consumer in the group: + +* **name**: the consumer's name +* **pending**: the number of pending messages for the client, which are messages that were delivered but are yet to be acknowledged +* **idle**: the number of milliseconds that have passed since the consumer last interacted with the server + +@reply + +@array-reply: a list of consumers. + +@examples + +``` +> XINFO CONSUMERS mystream mygroup +1) 1) name + 2) "Alice" + 3) pending + 4) (integer) 1 + 5) idle + 6) (integer) 9104628 +2) 1) name + 2) "Bob" + 3) pending + 4) (integer) 1 + 5) idle + 6) (integer) 83841983 +``` diff --git a/iredis/data/commands/xinfo-groups.md b/iredis/data/commands/xinfo-groups.md new file mode 100644 index 0000000..03eafec --- /dev/null +++ b/iredis/data/commands/xinfo-groups.md @@ -0,0 +1,74 @@ +This command returns the list of all consumers groups of the stream stored at `<key>`. + +By default, only the following information is provided for each of the groups: + +* **name**: the consumer group's name +* **consumers**: the number of consumers in the group +* **pending**: the length of the group's pending entries list (PEL), which are messages that were delivered but are yet to be acknowledged +* **last-delivered-id**: the ID of the last entry delivered the group's consumers +* **entries-read**: the logical "read counter" of the last entry delivered to group's consumers +* **lag**: the number of entries in the stream that are still waiting to be delivered to the group's consumers, or a NULL when that number can't be determined. + +### Consumer group lag + +The lag of a given consumer group is the number of entries in the range between the group's `entries_read` and the stream's `entries_added`. +Put differently, it is the number of entries that are yet to be delivered to the group's consumers. + +The values and trends of this metric are helpful in making scaling decisions about the consumer group. +You can address high lag values by adding more consumers to the group, whereas low values may indicate that you can remove consumers from the group to scale it down. + +Redis reports the lag of a consumer group by keeping two counters: the number of all entries added to the stream and the number of logical reads made by the consumer group. +The lag is the difference between these two. + +The stream's counter (the `entries_added` field of the `XINFO STREAM` command) is incremented by one with every `XADD` and counts all of the entries added to the stream during its lifetime. + +The consumer group's counter, `entries_read`, is the logical counter of entries that the group had read. +It is important to note that this counter is only a heuristic rather than an accurate counter, and therefore the use of the term "logical". +The counter attempts to reflect the number of entries that the group **should have read** to get to its current `last-delivered-id`. +The `entries_read` counter is accurate only in a perfect world, where a consumer group starts at the stream's first entry and processes all of its entries (i.e., no entries deleted before processing). + +There are two special cases in which this mechanism is unable to report the lag: + +1. A consumer group is created or set with an arbitrary last delivered ID (the `XGROUP CREATE` and `XGROUP SETID` commands, respectively). + An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. +2. One or more entries between the group's `last-delivered-id` and the stream's `last-generated-id` were deleted (with `XDEL` or a trimming operation). + +In both cases, the group's read counter is considered invalid, and the returned value is set to NULL to signal that the lag isn't currently available. + +However, the lag is only temporarily unavailable. +It is restored automatically during regular operation as consumers keep processing messages. +Once the consumer group delivers the last message in the stream to its members, it will be set with the correct logical read counter, and tracking its lag can be resumed. + +@reply + +@array-reply: a list of consumer groups. + +@examples + +``` +> XINFO GROUPS mystream +1) 1) "name" + 2) "mygroup" + 3) "consumers" + 4) (integer) 2 + 5) "pending" + 6) (integer) 2 + 7) "last-delivered-id" + 8) "1638126030001-0" + 9) "entries-read" + 10) (integer) 2 + 11) "lag" + 12) (integer) 0 +2) 1) "name" + 2) "some-other-group" + 3) "consumers" + 4) (integer) 1 + 5) "pending" + 6) (integer) 0 + 7) "last-delivered-id" + 8) "1638126028070-0" + 9) "entries-read" + 10) (integer) 1 + 11) "lag" + 12) (integer) 1 +``` diff --git a/iredis/data/commands/xinfo-help.md b/iredis/data/commands/xinfo-help.md new file mode 100644 index 0000000..293892f --- /dev/null +++ b/iredis/data/commands/xinfo-help.md @@ -0,0 +1,5 @@ +The `XINFO HELP` command returns a helpful text describing the different subcommands. + +@return + +@array-reply: a list of subcommands and their descriptions diff --git a/iredis/data/commands/xinfo-stream.md b/iredis/data/commands/xinfo-stream.md new file mode 100644 index 0000000..f697608 --- /dev/null +++ b/iredis/data/commands/xinfo-stream.md @@ -0,0 +1,118 @@ +This command returns information about the stream stored at `<key>`. + +The informative details provided by this command are: + +* **length**: the number of entries in the stream (see `XLEN`) +* **radix-tree-keys**: the number of keys in the underlying radix data structure +* **radix-tree-nodes**: the number of nodes in the underlying radix data structure +* **groups**: the number of consumer groups defined for the stream +* **last-generated-id**: the ID of the least-recently entry that was added to the stream +* **max-deleted-entry-id**: the maximal entry ID that was deleted from the stream +* **entries-added**: the count of all entries added to the stream during its lifetime +* **first-entry**: the ID and field-value tuples of the first entry in the stream +* **last-entry**: the ID and field-value tuples of the last entry in the stream + +The optional `FULL` modifier provides a more verbose reply. +When provided, the `FULL` reply includes an **entries** array that consists of the stream entries (ID and field-value tuples) in ascending order. +Furthermore, **groups** is also an array, and for each of the consumer groups it consists of the information reported by `XINFO GROUPS` and `XINFO CONSUMERS`. + +The `COUNT` option can be used to limit the number of stream and PEL entries that are returned (The first `<count>` entries are returned). +The default `COUNT` is 10 and a `COUNT` of 0 means that all entries will be returned (execution time may be long if the stream has a lot of entries). + +@return + +@array-reply: a list of informational bits + +@examples + +Default reply: + +``` +> XINFO STREAM mystream + 1) "length" + 2) (integer) 2 + 3) "radix-tree-keys" + 4) (integer) 1 + 5) "radix-tree-nodes" + 6) (integer) 2 + 7) "last-generated-id" + 8) "1638125141232-0" + 9) "max-deleted-entry-id" +10) "0-0" +11) "entries-added" +12) (integer) 2 +13) "groups" +14) (integer) 1 +15) "first-entry" +16) 1) "1638125133432-0" + 2) 1) "message" + 2) "apple" +17) "last-entry" +18) 1) "1638125141232-0" + 2) 1) "message" + 2) "banana" +``` + +Full reply: + +``` +> XADD mystream * foo bar +"1638125133432-0" +> XADD mystream * foo bar2 +"1638125141232-0" +> XGROUP CREATE mystream mygroup 0-0 +OK +> XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream > +1) 1) "mystream" + 2) 1) 1) "1638125133432-0" + 2) 1) "foo" + 2) "bar" +> XINFO STREAM mystream FULL + 1) "length" + 2) (integer) 2 + 3) "radix-tree-keys" + 4) (integer) 1 + 5) "radix-tree-nodes" + 6) (integer) 2 + 7) "last-generated-id" + 8) "1638125141232-0" + 9) "max-deleted-entry-id" +10) "0-0" +11) "entries-added" +12) (integer) 2 +13) "entries" +14) 1) 1) "1638125133432-0" + 2) 1) "foo" + 2) "bar" + 2) 1) "1638125141232-0" + 2) 1) "foo" + 2) "bar2" +15) "groups" +16) 1) 1) "name" + 2) "mygroup" + 3) "last-delivered-id" + 4) "1638125133432-0" + 5) "entries-read" + 6) (integer) 1 + 7) "lag" + 8) (integer) 1 + 9) "pel-count" + 10) (integer) 1 + 11) "pending" + 12) 1) 1) "1638125133432-0" + 2) "Alice" + 3) (integer) 1638125153423 + 4) (integer) 1 + 13) "consumers" + 14) 1) 1) "name" + 2) "Alice" + 3) "seen-time" + 4) (integer) 1638125153423 + 5) "pel-count" + 6) (integer) 1 + 7) "pending" + 8) 1) 1) "1638125133432-0" + 2) (integer) 1638125153423 + 3) (integer) 1 +> +``` diff --git a/iredis/data/commands/xinfo.md b/iredis/data/commands/xinfo.md index d8c226b..93fe9a2 100644 --- a/iredis/data/commands/xinfo.md +++ b/iredis/data/commands/xinfo.md @@ -1,183 +1,3 @@ -This is an introspection command used in order to retrieve different information -about the streams and associated consumer groups. Three forms are possible: +This is a container command for stream introspection commands. -- `XINFO STREAM <key>` - -In this form the command returns general information about the stream stored at -the specified key. - -``` -> XINFO STREAM mystream - 1) length - 2) (integer) 2 - 3) radix-tree-keys - 4) (integer) 1 - 5) radix-tree-nodes - 6) (integer) 2 - 7) groups - 8) (integer) 2 - 9) last-generated-id -10) 1538385846314-0 -11) first-entry -12) 1) 1538385820729-0 - 2) 1) "foo" - 2) "bar" -13) last-entry -14) 1) 1538385846314-0 - 2) 1) "field" - 2) "value" -``` - -In the above example you can see that the reported information are the number of -elements of the stream, details about the radix tree representing the stream -mostly useful for optimization and debugging tasks, the number of consumer -groups associated with the stream, the last generated ID that may not be the -same as the last entry ID in case some entry was deleted. Finally the full first -and last entry in the stream are shown, in order to give some sense about what -is the stream content. - -- `XINFO STREAM <key> FULL [COUNT <count>]` - -In this form the command returns the entire state of the stream, including -entries, groups, consumers and Pending Entries Lists (PELs). This form is -available since Redis 6.0. - -``` -> XADD mystream * foo bar -"1588152471065-0" -> XADD mystream * foo bar2 -"1588152473531-0" -> XGROUP CREATE mystream mygroup 0-0 -OK -> XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream > -1) 1) "mystream" - 2) 1) 1) "1588152471065-0" - 2) 1) "foo" - 2) "bar" -> XINFO STREAM mystream FULL - 1) "length" - 2) (integer) 2 - 3) "radix-tree-keys" - 4) (integer) 1 - 5) "radix-tree-nodes" - 6) (integer) 2 - 7) "last-generated-id" - 8) "1588152473531-0" - 9) "entries" -10) 1) 1) "1588152471065-0" - 2) 1) "foo" - 2) "bar" - 2) 1) "1588152473531-0" - 2) 1) "foo" - 2) "bar2" -11) "groups" -12) 1) 1) "name" - 2) "mygroup" - 3) "last-delivered-id" - 4) "1588152471065-0" - 5) "pel-count" - 6) (integer) 1 - 7) "pending" - 8) 1) 1) "1588152471065-0" - 2) "Alice" - 3) (integer) 1588152520299 - 4) (integer) 1 - 9) "consumers" - 10) 1) 1) "name" - 2) "Alice" - 3) "seen-time" - 4) (integer) 1588152520299 - 5) "pel-count" - 6) (integer) 1 - 7) "pending" - 8) 1) 1) "1588152471065-0" - 2) (integer) 1588152520299 - 3) (integer) 1 -``` - -The reported information contains all of the fields reported by the simple form -of `XINFO STREAM`, with some additional information: - -1. Stream entries are returned, including fields and values. -2. Groups, consumers and PELs are returned. - -The `COUNT` option is used to limit the amount of stream/PEL entries that are -returned (The first `<count>` entries are returned). The default `COUNT` is 10 -and a `COUNT` of 0 means that all entries will be returned (Execution time may -be long if the stream has a lot of entries) - -- `XINFO GROUPS <key>` - -In this form we just get as output all the consumer groups associated with the -stream: - -``` -> XINFO GROUPS mystream -1) 1) name - 2) "mygroup" - 3) consumers - 4) (integer) 2 - 5) pending - 6) (integer) 2 - 7) last-delivered-id - 8) "1588152489012-0" -2) 1) name - 2) "some-other-group" - 3) consumers - 4) (integer) 1 - 5) pending - 6) (integer) 0 - 7) last-delivered-id - 8) "1588152498034-0" -``` - -For each consumer group listed the command also shows the number of consumers -known in that group and the pending messages (delivered but not yet -acknowledged) in that group. - -- `XINFO CONSUMERS <key> <group>` - -Finally it is possible to get the list of every consumer in a specific consumer -group: - -``` -> XINFO CONSUMERS mystream mygroup -1) 1) name - 2) "Alice" - 3) pending - 4) (integer) 1 - 5) idle - 6) (integer) 9104628 -2) 1) name - 2) "Bob" - 3) pending - 4) (integer) 1 - 5) idle - 6) (integer) 83841983 -``` - -We can see the idle time in milliseconds (last field) together with the consumer -name and the number of pending messages for this specific consumer. - -**Note that you should not rely on the fields exact position**, nor on the -number of fields, new fields may be added in the future. So a well behaving -client should fetch the whole list, and report it to the user, for example, as a -dictionary data structure. Low level clients such as C clients where the items -will likely be reported back in a linear array should document that the order is -undefined. - -Finally it is possible to get help from the command, in case the user can't -remember the exact syntax, by using the `HELP` subcommand: - -``` -> XINFO HELP -1) XINFO <subcommand> arg arg ... arg. Subcommands are: -2) CONSUMERS <key> <groupname> -- Show consumer groups of group <groupname>. -3) GROUPS <key> -- Show the stream consumer groups. -4) STREAM <key> -- Show information about the stream. -5) HELP -``` - -@history - -- `>= 6.0.0`: Added the `FULL` option to `XINFO STREAM`. +To see the list of available commands you can call `XINFO HELP`. diff --git a/iredis/data/commands/xlen.md b/iredis/data/commands/xlen.md index 5506449..41c2010 100644 --- a/iredis/data/commands/xlen.md +++ b/iredis/data/commands/xlen.md @@ -1,11 +1,12 @@ Returns the number of entries inside a stream. If the specified key does not -exist the command returns zero, as if the stream was empty. However note that -unlike other Redis types, zero-length streams are possible, so you should call -`TYPE` or `EXISTS` in order to check if a key exists or not. +exist the command returns zero, as if the stream was empty. +However note that unlike other Redis types, zero-length streams are +possible, so you should call `TYPE` or `EXISTS` in order to check if +a key exists or not. Streams are not auto-deleted once they have no entries inside (for instance -after an `XDEL` call), because the stream may have consumer groups associated -with it. +after an `XDEL` call), because the stream may have consumer groups +associated with it. @return diff --git a/iredis/data/commands/xpending.md b/iredis/data/commands/xpending.md index 7eb48e0..48840aa 100644 --- a/iredis/data/commands/xpending.md +++ b/iredis/data/commands/xpending.md @@ -1,28 +1,30 @@ -Fetching data from a stream via a consumer group, and not acknowledging such -data, has the effect of creating _pending entries_. This is well explained in -the `XREADGROUP` command, and even better in our -[introduction to Redis Streams](/topics/streams-intro). The `XACK` command will -immediately remove the pending entry from the Pending Entries List (PEL) since -once a message is successfully processed, there is no longer need for the -consumer group to track it and to remember the current owner of the message. - -The `XPENDING` command is the interface to inspect the list of pending messages, -and is as thus a very important command in order to observe and understand what -is happening with a streams consumer groups: what clients are active, what -messages are pending to be consumed, or to see if there are idle messages. -Moreover this command, together with `XCLAIM` is used in order to implement -recovering of consumers that are failing for a long time, and as a result -certain messages are not processed: a different consumer can claim the message -and continue. This is better explained in the -[streams intro](/topics/streams-intro) and in the `XCLAIM` command page, and is -not covered here. +Fetching data from a stream via a consumer group, and not acknowledging +such data, has the effect of creating *pending entries*. This is +well explained in the `XREADGROUP` command, and even better in our +[introduction to Redis Streams](/topics/streams-intro). The `XACK` command +will immediately remove the pending entry from the Pending Entries List (PEL) +since once a message is successfully processed, there is no longer need +for the consumer group to track it and to remember the current owner +of the message. + +The `XPENDING` command is the interface to inspect the list of pending +messages, and is as thus a very important command in order to observe +and understand what is happening with a streams consumer groups: what +clients are active, what messages are pending to be consumed, or to see +if there are idle messages. Moreover this command, together with `XCLAIM` +is used in order to implement recovering of consumers that are failing +for a long time, and as a result certain messages are not processed: a +different consumer can claim the message and continue. This is better +explained in the [streams intro](/topics/streams-intro) and in the +`XCLAIM` command page, and is not covered here. ## Summary form of XPENDING -When `XPENDING` is called with just a key name and a consumer group name, it -just outputs a summary about the pending messages in a given consumer group. In -the following example, we create a consumer group and immediately create a -pending message by reading from the group with `XREADGROUP`. +When `XPENDING` is called with just a key name and a consumer group +name, it just outputs a summary about the pending messages in a given +consumer group. In the following example, we create a consumer group and +immediately create a pending message by reading from the group with +`XREADGROUP`. ``` > XGROUP CREATE mystream group55 0-0 @@ -39,10 +41,10 @@ OK 6) "7782813" ``` -We expect the pending entries list for the consumer group `group55` to have a -message right now: consumer named `consumer-123` fetched the message without -acknowledging its processing. The simple `XPENDING` form will give us this -information: +We expect the pending entries list for the consumer group `group55` to +have a message right now: consumer named `consumer-123` fetched the +message without acknowledging its processing. The simple `XPENDING` +form will give us this information: ``` > XPENDING mystream group55 @@ -55,16 +57,16 @@ information: In this form, the command outputs the total number of pending messages for this consumer group, which is one, followed by the smallest and greatest ID among the -pending messages, and then list every consumer in the consumer group with at -least one pending message, and the number of pending messages it has. +pending messages, and then list every consumer in the consumer group with +at least one pending message, and the number of pending messages it has. ## Extended form of XPENDING The summary provides a good overview, but sometimes we are interested in the details. In order to see all the pending messages with more associated information we need to also pass a range of IDs, in a similar way we do it with -`XRANGE`, and a non optional _count_ argument, to limit the number of messages -returned per call: +`XRANGE`, and a non optional *count* argument, to limit the number +of messages returned per call: ``` > XPENDING mystream group55 - + 10 @@ -74,49 +76,48 @@ returned per call: 4) (integer) 1 ``` -In the extended form we no longer see the summary information, instead there is -detailed information for each message in the pending entries list. For each -message four attributes are returned: +In the extended form we no longer see the summary information, instead there +is detailed information for each message in the pending entries list. For +each message four attributes are returned: 1. The ID of the message. -2. The name of the consumer that fetched the message and has still to - acknowledge it. We call it the current _owner_ of the message. -3. The number of milliseconds that elapsed since the last time this message was - delivered to this consumer. +2. The name of the consumer that fetched the message and has still to acknowledge it. We call it the current *owner* of the message. +3. The number of milliseconds that elapsed since the last time this message was delivered to this consumer. 4. The number of times this message was delivered. The deliveries counter, that is the fourth element in the array, is incremented -when some other consumer _claims_ the message with `XCLAIM`, or when the message -is delivered again via `XREADGROUP`, when accessing the history of a consumer in -a consumer group (see the `XREADGROUP` page for more info). +when some other consumer *claims* the message with `XCLAIM`, or when the +message is delivered again via `XREADGROUP`, when accessing the history +of a consumer in a consumer group (see the `XREADGROUP` page for more info). -It is possible to pass an additional argument to the command, in order to see -the messages having a specific owner: +It is possible to pass an additional argument to the command, in order +to see the messages having a specific owner: ``` > XPENDING mystream group55 - + 10 consumer-123 ``` But in the above case the output would be the same, since we have pending -messages only for a single consumer. However what is important to keep in mind -is that this operation, filtering by a specific consumer, is not inefficient -even when there are many pending messages from many consumers: we have a pending -entries list data structure both globally, and for every consumer, so we can -very efficiently show just messages pending for a single consumer. +messages only for a single consumer. However what is important to keep in +mind is that this operation, filtering by a specific consumer, is not +inefficient even when there are many pending messages from many consumers: +we have a pending entries list data structure both globally, and for +every consumer, so we can very efficiently show just messages pending for +a single consumer. ## Idle time filter -Since version 6.2 it is possible to filter entries by their idle-time, given in -milliseconds (useful for `XCLAIM`ing entries that have not been processed for -some time): +It is also possible to filter pending stream entries by their idle-time, +given in milliseconds (useful for `XCLAIM`ing entries that have not been +processed for some time): ``` > XPENDING mystream group55 IDLE 9000 - + 10 > XPENDING mystream group55 IDLE 9000 - + 10 consumer-123 ``` -The first case will return the first 10 (or less) PEL entries of the entire -group that are idle for over 9 seconds, whereas in the second case only those of +The first case will return the first 10 (or less) PEL entries of the entire group +that are idle for over 9 seconds, whereas in the second case only those of `consumer-123`. ## Exclusive ranges and iterating the PEL @@ -131,10 +132,6 @@ command. @array-reply, specifically: -The command returns data in different format depending on the way it is called, -as previously explained in this page. However the reply is always an array of -items. - -@history - -- `>= 6.2.0`: Added the `IDLE` option and exclusive range intervals. +The command returns data in different format depending on the way it is +called, as previously explained in this page. However the reply is always +an array of items. diff --git a/iredis/data/commands/xrange.md b/iredis/data/commands/xrange.md index fc0337e..fc6d11d 100644 --- a/iredis/data/commands/xrange.md +++ b/iredis/data/commands/xrange.md @@ -1,26 +1,26 @@ -The command returns the stream entries matching a given range of IDs. The range -is specified by a minimum and maximum ID. All the entries having an ID between -the two specified or exactly one of the two IDs specified (closed interval) are -returned. +The command returns the stream entries matching a given range of IDs. +The range is specified by a minimum and maximum ID. All the entries having +an ID between the two specified or exactly one of the two IDs specified +(closed interval) are returned. The `XRANGE` command has a number of applications: -- Returning items in a specific time range. This is possible because Stream IDs - are [related to time](/topics/streams-intro). -- Iterating a stream incrementally, returning just a few items at every - iteration. However it is semantically much more robust than the `SCAN` family - of functions. -- Fetching a single entry from a stream, providing the ID of the entry to fetch - two times: as start and end of the query interval. +* Returning items in a specific time range. This is possible because + Stream IDs are [related to time](/topics/streams-intro). +* Iterating a stream incrementally, returning just + a few items at every iteration. However it is semantically much more + robust than the `SCAN` family of functions. +* Fetching a single entry from a stream, providing the ID of the entry + to fetch two times: as start and end of the query interval. -The command also has a reciprocal command returning items in the reverse order, -called `XREVRANGE`, which is otherwise identical. +The command also has a reciprocal command returning items in the +reverse order, called `XREVRANGE`, which is otherwise identical. ## `-` and `+` special IDs -The `-` and `+` special IDs mean respectively the minimum ID possible and the -maximum ID possible inside a stream, so the following command will just return -every entry in the stream: +The `-` and `+` special IDs mean respectively the minimum ID possible +and the maximum ID possible inside a stream, so the following command +will just return every entry in the stream: ``` > XRANGE somestream - + @@ -41,45 +41,45 @@ every entry in the stream: ... other entries here ... ``` -The `-` ID is effectively just exactly as specifying `0-0`, while `+` is -equivalent to `18446744073709551615-18446744073709551615`, however they are -nicer to type. +The `-` ID is effectively just exactly as specifying `0-0`, while +`+` is equivalent to `18446744073709551615-18446744073709551615`, however +they are nicer to type. ## Incomplete IDs Stream IDs are composed of two parts, a Unix millisecond time stamp and a -sequence number for entries inserted in the same millisecond. It is possible to -use `XRANGE` specifying just the first part of the ID, the millisecond time, +sequence number for entries inserted in the same millisecond. It is possible +to use `XRANGE` specifying just the first part of the ID, the millisecond time, like in the following example: ``` > XRANGE somestream 1526985054069 1526985055069 ``` -In this case, `XRANGE` will auto-complete the start interval with `-0` and end -interval with `-18446744073709551615`, in order to return all the entries that -were generated between a given millisecond and the end of the other specified -millisecond. This also means that repeating the same millisecond two times, we -get all the entries within such millisecond, because the sequence number range -will be from zero to the maximum. +In this case, `XRANGE` will auto-complete the start interval with `-0` +and end interval with `-18446744073709551615`, in order to return all the +entries that were generated between a given millisecond and the end of +the other specified millisecond. This also means that repeating the same +millisecond two times, we get all the entries within such millisecond, +because the sequence number range will be from zero to the maximum. -Used in this way `XRANGE` works as a range query command to obtain entries in a -specified time. This is very handy in order to access the history of past events -in a stream. +Used in this way `XRANGE` works as a range query command to obtain entries +in a specified time. This is very handy in order to access the history +of past events in a stream. ## Exclusive ranges The range is close (inclusive) by default, meaning that the reply can include -entries with IDs matching the query's start and end intervals. It is possible to -specify an open interval (exclusive) by prefixing the ID with the character `(`. -This is useful for iterating the stream, as explained below. +entries with IDs matching the query's start and end intervals. It is possible +to specify an open interval (exclusive) by prefixing the ID with the +character `(`. This is useful for iterating the stream, as explained below. ## Returning a maximum number of entries Using the **COUNT** option it is possible to reduce the number of entries -reported. This is a very important feature even if it may look marginal, because -it allows, for instance, to model operations such as _give me the entry greater -or equal to the following_: +reported. This is a very important feature even if it may look marginal, +because it allows, for instance, to model operations such as *give me +the entry greater or equal to the following*: ``` > XRANGE somestream 1526985054069-0 + COUNT 1 @@ -92,15 +92,15 @@ or equal to the following_: 6) "839248" ``` -In the above case the entry `1526985054069-0` exists, otherwise the server would -have sent us the next one. Using `COUNT` is also the base in order to use -`XRANGE` as an iterator. +In the above case the entry `1526985054069-0` exists, otherwise the server +would have sent us the next one. Using `COUNT` is also the base in order to +use `XRANGE` as an iterator. ## Iterating a stream -In order to iterate a stream, we can proceed as follows. Let's assume that we -want two elements per iteration. We start fetching the first two elements, which -is trivial: +In order to iterate a stream, we can proceed as follows. Let's assume that +we want two elements per iteration. We start fetching the first two +elements, which is trivial: ``` > XRANGE writers - + COUNT 2 @@ -116,12 +116,12 @@ is trivial: 4) "Austen" ``` -Then instead of starting the iteration again from `-`, as the start of the range -we use the entry ID of the _last_ entry returned by the previous `XRANGE` call -as an exclusive interval. +Then instead of starting the iteration again from `-`, as the start +of the range we use the entry ID of the *last* entry returned by the +previous `XRANGE` call as an exclusive interval. -The ID of the last entry is `1526985685298-0`, so we just prefix it with a '(', -and continue our iteration: +The ID of the last entry is `1526985685298-0`, so we just prefix it +with a '(', and continue our iteration: ``` > XRANGE writers (1526985685298-0 + COUNT 2 @@ -137,14 +137,15 @@ and continue our iteration: 4) "Christie" ``` -And so forth. Eventually this will allow to visit all the entries in the stream. -Obviously, we can start the iteration from any ID, or even from a specific time, -by providing a given incomplete start ID. Moreover, we can limit the iteration -to a given ID or time, by providing an end ID or incomplete ID instead of `+`. +And so forth. Eventually this will allow to visit all the entries in the +stream. Obviously, we can start the iteration from any ID, or even from +a specific time, by providing a given incomplete start ID. Moreover, we +can limit the iteration to a given ID or time, by providing an end +ID or incomplete ID instead of `+`. -The command `XREAD` is also able to iterate the stream. The command `XREVRANGE` -can iterate the stream reverse, from higher IDs (or times) to lower IDs (or -times). +The command `XREAD` is also able to iterate the stream. +The command `XREVRANGE` can iterate the stream reverse, from higher IDs +(or times) to lower IDs (or times). ### Iterating with earlier versions of Redis @@ -167,21 +168,22 @@ The second call would, therefore, be: ... ``` -Also, note that once the sequence part of the last ID equals +Also, note that once the sequence part of the last ID equals 18446744073709551615, you'll need to increment the timestamp and reset the sequence part to 0. For example, incrementing the ID `1526985685298-18446744073709551615` should result in `1526985685299-0`. -A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The only -difference is that the client needs to decrement the ID for the subsequent -calls. When decrementing an ID with a sequence part of 0, the timestamp needs to -be decremented by 1 and the sequence set to 18446744073709551615. +A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The +only difference is that the client needs to decrement the ID for the subsequent +calls. When decrementing an ID with a sequence part of 0, the timestamp needs +to be decremented by 1 and the sequence set to 18446744073709551615. ## Fetching single items -If you look for an `XGET` command you'll be disappointed because `XRANGE` is -effectively the way to go in order to fetch a single entry from a stream. All -you have to do is to specify the ID two times in the arguments of XRANGE: +If you look for an `XGET` command you'll be disappointed because `XRANGE` +is effectively the way to go in order to fetch a single entry from a +stream. All you have to do is to specify the ID two times in the arguments +of XRANGE: ``` > XRANGE mystream 1526984818136-0 1526984818136-0 @@ -203,14 +205,10 @@ For further information about Redis streams please check our @array-reply, specifically: -The command returns the entries with IDs matching the specified range. The -returned entries are complete, that means that the ID and all the fields they -are composed are returned. Moreover, the entries are returned with their fields -and values in the exact same order as `XADD` added them. - -@history - -- `>= 6.2` Added exclusive ranges. +The command returns the entries with IDs matching the specified range. +The returned entries are complete, that means that the ID and all the fields +they are composed are returned. Moreover, the entries are returned with +their fields and values in the exact same order as `XADD` added them. @examples diff --git a/iredis/data/commands/xread.md b/iredis/data/commands/xread.md index 6a45fb4..ea0f311 100644 --- a/iredis/data/commands/xread.md +++ b/iredis/data/commands/xread.md @@ -1,33 +1,33 @@ -Read data from one or multiple streams, only returning entries with an ID -greater than the last received ID reported by the caller. This command has an -option to block if items are not available, in a similar fashion to `BRPOP` or -`BZPOPMIN` and others. +Read data from one or multiple streams, only returning entries with an +ID greater than the last received ID reported by the caller. +This command has an option to block if items are not available, in a similar +fashion to `BRPOP` or `BZPOPMIN` and others. -Please note that before reading this page, if you are new to streams, we -recommend to read [our introduction to Redis Streams](/topics/streams-intro). +Please note that before reading this page, if you are new to streams, +we recommend to read [our introduction to Redis Streams](/topics/streams-intro). ## Non-blocking usage -If the **BLOCK** option is not used, the command is synchronous, and can be -considered somewhat related to `XRANGE`: it will return a range of items inside -streams, however it has two fundamental differences compared to `XRANGE` even if -we just consider the synchronous usage: +If the **BLOCK** option is not used, the command is synchronous, and can +be considered somewhat related to `XRANGE`: it will return a range of items +inside streams, however it has two fundamental differences compared to `XRANGE` +even if we just consider the synchronous usage: -- This command can be called with multiple streams if we want to read at the - same time from a number of keys. This is a key feature of `XREAD` because +* This command can be called with multiple streams if we want to read at + the same time from a number of keys. This is a key feature of `XREAD` because especially when blocking with **BLOCK**, to be able to listen with a single connection to multiple keys is a vital feature. -- While `XRANGE` returns items in a range of IDs, `XREAD` is more suited in +* While `XRANGE` returns items in a range of IDs, `XREAD` is more suited in order to consume the stream starting from the first entry which is greater than any other entry we saw so far. So what we pass to `XREAD` is, for each stream, the ID of the last element that we received from that stream. -For example, if I have two streams `mystream` and `writers`, and I want to read -data from both the streams starting from the first element they contain, I could -call `XREAD` like in the following example. +For example, if I have two streams `mystream` and `writers`, and I want to +read data from both the streams starting from the first element they contain, +I could call `XREAD` like in the following example. -Note: we use the **COUNT** option in the example, so that for each stream the -call will return at maximum two elements per stream. +Note: we use the **COUNT** option in the example, so that for each stream +the call will return at maximum two elements per stream. ``` > XREAD COUNT 2 STREAMS mystream writers 0-0 0-0 @@ -59,18 +59,18 @@ call will return at maximum two elements per stream. 4) "Austen" ``` -The **STREAMS** option is mandatory and MUST be the final option because such -option gets a variable length of argument in the following format: +The **STREAMS** option is mandatory and MUST be the final option because +such option gets a variable length of argument in the following format: STREAMS key_1 key_2 key_3 ... key_N ID_1 ID_2 ID_3 ... ID_N -So we start with a list of keys, and later continue with all the associated IDs, -representing _the last ID we received for that stream_, so that the call will -serve us only greater IDs from the same stream. +So we start with a list of keys, and later continue with all the associated +IDs, representing *the last ID we received for that stream*, so that the +call will serve us only greater IDs from the same stream. -For instance in the above example, the last items that we received for the -stream `mystream` has ID `1526999352406-0`, while for the stream `writers` has -the ID `1526985685298-0`. +For instance in the above example, the last items that we received +for the stream `mystream` has ID `1526999352406-0`, while for the +stream `writers` has the ID `1526985685298-0`. To continue iterating the two streams I'll call: @@ -97,16 +97,16 @@ To continue iterating the two streams I'll call: 4) "Christie" ``` -And so forth. Eventually, the call will not return any item, but just an empty -array, then we know that there is nothing more to fetch from our stream (and we -would have to retry the operation, hence this command also supports a blocking -mode). +And so forth. Eventually, the call will not return any item, but just an +empty array, then we know that there is nothing more to fetch from our +stream (and we would have to retry the operation, hence this command +also supports a blocking mode). ## Incomplete IDs -To use incomplete IDs is valid, like it is valid for `XRANGE`. However here the -sequence part of the ID, if missing, is always interpreted as zero, so the -command: +To use incomplete IDs is valid, like it is valid for `XRANGE`. However +here the sequence part of the ID, if missing, is always interpreted as +zero, so the command: ``` > XREAD COUNT 2 STREAMS mystream writers 0 0 @@ -120,31 +120,33 @@ is exactly equivalent to ## Blocking for data -In its synchronous form, the command can get new data as long as there are more -items available. However, at some point, we'll have to wait for producers of -data to use `XADD` to push new entries inside the streams we are consuming. In -order to avoid polling at a fixed or adaptive interval the command is able to -block if it could not return any data, according to the specified streams and -IDs, and automatically unblock once one of the requested keys accept data. - -It is important to understand that this command _fans out_ to all the clients -that are waiting for the same range of IDs, so every consumer will get a copy of -the data, unlike to what happens when blocking list pop operations are used. - -In order to block, the **BLOCK** option is used, together with the number of -milliseconds we want to block before timing out. Normally Redis blocking +In its synchronous form, the command can get new data as long as there +are more items available. However, at some point, we'll have to wait for +producers of data to use `XADD` to push new entries inside the streams +we are consuming. In order to avoid polling at a fixed or adaptive interval +the command is able to block if it could not return any data, according +to the specified streams and IDs, and automatically unblock once one of +the requested keys accept data. + +It is important to understand that this command *fans out* to all the +clients that are waiting for the same range of IDs, so every consumer will +get a copy of the data, unlike to what happens when blocking list pop +operations are used. + +In order to block, the **BLOCK** option is used, together with the number +of milliseconds we want to block before timing out. Normally Redis blocking commands take timeouts in seconds, however this command takes a millisecond -timeout, even if normally the server will have a timeout resolution near to 0.1 -seconds. This time it is possible to block for a shorter time in certain use -cases, and if the server internals will improve over time, it is possible that -the resolution of timeouts will improve. +timeout, even if normally the server will have a timeout resolution near +to 0.1 seconds. This time it is possible to block for a shorter time in +certain use cases, and if the server internals will improve over time, it is +possible that the resolution of timeouts will improve. -When the **BLOCK** command is passed, but there is data to return at least in -one of the streams passed, the command is executed synchronously _exactly like -if the BLOCK option would be missing_. +When the **BLOCK** command is passed, but there is data to return at +least in one of the streams passed, the command is executed synchronously +*exactly like if the BLOCK option would be missing*. -This is an example of blocking invocation, where the command later returns a -null reply because the timeout has elapsed without new data arriving: +This is an example of blocking invocation, where the command later returns +a null reply because the timeout has elapsed without new data arriving: ``` > XREAD BLOCK 1000 STREAMS mystream 1526999626221-0 @@ -153,21 +155,21 @@ null reply because the timeout has elapsed without new data arriving: ## The special `$` ID. -When blocking sometimes we want to receive just entries that are added to the -stream via `XADD` starting from the moment we block. In such a case we are not -interested in the history of already added entries. For this use case, we would -have to check the stream top element ID, and use such ID in the `XREAD` command -line. This is not clean and requires to call other commands, so instead it is -possible to use the special `$` ID to signal the stream that we want only the -new things. +When blocking sometimes we want to receive just entries that are added +to the stream via `XADD` starting from the moment we block. In such a case +we are not interested in the history of already added entries. For +this use case, we would have to check the stream top element ID, and use +such ID in the `XREAD` command line. This is not clean and requires to +call other commands, so instead it is possible to use the special `$` +ID to signal the stream that we want only the new things. -It is **very important** to understand that you should use the `$` ID only for -the first call to `XREAD`. Later the ID should be the one of the last reported -item in the stream, otherwise you could miss all the entries that are added in -between. +It is **very important** to understand that you should use the `$` +ID only for the first call to `XREAD`. Later the ID should be the one +of the last reported item in the stream, otherwise you could miss all +the entries that are added in between. -This is how a typical `XREAD` call looks like in the first iteration of a -consumer willing to consume only new entries: +This is how a typical `XREAD` call looks like in the first iteration +of a consumer willing to consume only new entries: ``` > XREAD BLOCK 5000 COUNT 100 STREAMS mystream $ @@ -183,28 +185,31 @@ And so forth. ## How multiple clients blocked on a single stream are served -Blocking list operations on lists or sorted sets have a _pop_ behavior. -Basically, the element is removed from the list or sorted set in order to be -returned to the client. In this scenario you want the items to be consumed in a -fair way, depending on the moment clients blocked on a given key arrived. -Normally Redis uses the FIFO semantics in this use cases. +Blocking list operations on lists or sorted sets have a *pop* behavior. +Basically, the element is removed from the list or sorted set in order +to be returned to the client. In this scenario you want the items +to be consumed in a fair way, depending on the moment clients blocked +on a given key arrived. Normally Redis uses the FIFO semantics in this +use cases. -However note that with streams this is not a problem: stream entries are not -removed from the stream when clients are served, so every client waiting will be -served as soon as an `XADD` command provides data to the stream. +However note that with streams this is not a problem: stream entries +are not removed from the stream when clients are served, so every +client waiting will be served as soon as an `XADD` command provides +data to the stream. @return @array-reply, specifically: -The command returns an array of results: each element of the returned array is -an array composed of a two element containing the key name and the entries -reported for that key. The entries reported are full stream entries, having IDs -and the list of all the fields and values. Field and values are guaranteed to be -reported in the same order they were added by `XADD`. +The command returns an array of results: each element of the returned +array is an array composed of a two element containing the key name and +the entries reported for that key. The entries reported are full stream +entries, having IDs and the list of all the fields and values. Field and +values are guaranteed to be reported in the same order they were added +by `XADD`. When **BLOCK** is used, on timeout a null reply is returned. Reading the [Redis Streams introduction](/topics/streams-intro) is highly -suggested in order to understand more about the streams overall behavior and -semantics. +suggested in order to understand more about the streams overall behavior +and semantics. diff --git a/iredis/data/commands/xreadgroup.md b/iredis/data/commands/xreadgroup.md index f9201e4..4b516e5 100644 --- a/iredis/data/commands/xreadgroup.md +++ b/iredis/data/commands/xreadgroup.md @@ -1,103 +1,74 @@ -The `XREADGROUP` command is a special version of the `XREAD` command with -support for consumer groups. Probably you will have to understand the `XREAD` -command before reading this page will makes sense. +The `XREADGROUP` command is a special version of the `XREAD` command +with support for consumer groups. Probably you will have to understand the +`XREAD` command before reading this page will makes sense. Moreover, if you are new to streams, we recommend to read our -[introduction to Redis Streams](/topics/streams-intro). Make sure to understand -the concept of consumer group in the introduction so that following how this -command works will be simpler. +[introduction to Redis Streams](/topics/streams-intro). +Make sure to understand the concept of consumer group in the introduction +so that following how this command works will be simpler. ## Consumer groups in 30 seconds -The difference between this command and the vanilla `XREAD` is that this one -supports consumer groups. - -Without consumer groups, just using `XREAD`, all the clients are served with all -the entries arriving in a stream. Instead using consumer groups with -`XREADGROUP`, it is possible to create groups of clients that consume different -parts of the messages arriving in a given stream. If, for instance, the stream -gets the new entries A, B, and C and there are two consumers reading via a -consumer group, one client will get, for instance, the messages A and C, and the -other the message B, and so forth. - -Within a consumer group, a given consumer (that is, just a client consuming -messages from the stream), has to identify with an unique _consumer name_. Which -is just a string. - -One of the guarantees of consumer groups is that a given consumer can only see -the history of messages that were delivered to it, so a message has just a -single owner. However there is a special feature called _message claiming_ that -allows other consumers to claim messages in case there is a non recoverable -failure of some consumer. In order to implement such semantics, consumer groups -require explicit acknowledgment of the messages successfully processed by the -consumer, via the `XACK` command. This is needed because the stream will track, -for each consumer group, who is processing what message. +The difference between this command and the vanilla `XREAD` is that this +one supports consumer groups. + +Without consumer groups, just using `XREAD`, all the clients are served with all the entries arriving in a stream. Instead using consumer groups with `XREADGROUP`, it is possible to create groups of clients that consume different parts of the messages arriving in a given stream. If, for instance, the stream gets the new entries A, B, and C and there are two consumers reading via a consumer group, one client will get, for instance, the messages A and C, and the other the message B, and so forth. + +Within a consumer group, a given consumer (that is, just a client consuming messages from the stream), has to identify with a unique *consumer name*. Which is just a string. + +One of the guarantees of consumer groups is that a given consumer can only see the history of messages that were delivered to it, so a message has just a single owner. However there is a special feature called *message claiming* that allows other consumers to claim messages in case there is a non recoverable failure of some consumer. In order to implement such semantics, consumer groups require explicit acknowledgment of the messages successfully processed by the consumer, via the `XACK` command. This is needed because the stream will track, for each consumer group, who is processing what message. This is how to understand if you want to use a consumer group or not: -1. If you have a stream and multiple clients, and you want all the clients to - get all the messages, you do not need a consumer group. -2. If you have a stream and multiple clients, and you want the stream to be - _partitioned_ or _sharded_ across your clients, so that each client will get - a sub set of the messages arriving in a stream, you need a consumer group. +1. If you have a stream and multiple clients, and you want all the clients to get all the messages, you do not need a consumer group. +2. If you have a stream and multiple clients, and you want the stream to be *partitioned* or *sharded* across your clients, so that each client will get a sub set of the messages arriving in a stream, you need a consumer group. ## Differences between XREAD and XREADGROUP -From the point of view of the syntax, the commands are almost the same, however -`XREADGROUP` _requires_ a special and mandatory option: +From the point of view of the syntax, the commands are almost the same, +however `XREADGROUP` *requires* a special and mandatory option: GROUP <group-name> <consumer-name> The group name is just the name of a consumer group associated to the stream. -The group is created using the `XGROUP` command. The consumer name is the string -that is used by the client to identify itself inside the group. The consumer is -auto created inside the consumer group the first time it is saw. Different -clients should select a different consumer name. +The group is created using the `XGROUP` command. The consumer name is the +string that is used by the client to identify itself inside the group. +The consumer is auto created inside the consumer group the first time it +is saw. Different clients should select a different consumer name. -When you read with `XREADGROUP`, the server will _remember_ that a given message -was delivered to you: the message will be stored inside the consumer group in -what is called a Pending Entries List (PEL), that is a list of message IDs -delivered but not yet acknowledged. +When you read with `XREADGROUP`, the server will *remember* that a given +message was delivered to you: the message will be stored inside the +consumer group in what is called a Pending Entries List (PEL), that is +a list of message IDs delivered but not yet acknowledged. -The client will have to acknowledge the message processing using `XACK` in order -for the pending entry to be removed from the PEL. The PEL can be inspected using -the `XPENDING` command. +The client will have to acknowledge the message processing using `XACK` +in order for the pending entry to be removed from the PEL. The PEL +can be inspected using the `XPENDING` command. The `NOACK` subcommand can be used to avoid adding the message to the PEL in -cases where reliability is not a requirement and the occasional message loss is -acceptable. This is equivalent to acknowledging the message when it is read. +cases where reliability is not a requirement and the occasional message loss +is acceptable. This is equivalent to acknowledging the message when it is read. -The ID to specify in the **STREAMS** option when using `XREADGROUP` can be one -of the following two: +The ID to specify in the **STREAMS** option when using `XREADGROUP` can +be one of the following two: -- The special `>` ID, which means that the consumer want to receive only - messages that were _never delivered to any other consumer_. It just means, - give me new messages. -- Any other ID, that is, 0 or any other valid ID or incomplete ID (just the - millisecond time part), will have the effect of returning entries that are - pending for the consumer sending the command with IDs greater than the one - provided. So basically if the ID is not `>`, then the command will just let - the client access its pending entries: messages delivered to it, but not yet - acknowledged. Note that in this case, both `BLOCK` and `NOACK` are ignored. +* The special `>` ID, which means that the consumer want to receive only messages that were *never delivered to any other consumer*. It just means, give me new messages. +* Any other ID, that is, 0 or any other valid ID or incomplete ID (just the millisecond time part), will have the effect of returning entries that are pending for the consumer sending the command with IDs greater than the one provided. So basically if the ID is not `>`, then the command will just let the client access its pending entries: messages delivered to it, but not yet acknowledged. Note that in this case, both `BLOCK` and `NOACK` are ignored. -Like `XREAD` the `XREADGROUP` command can be used in a blocking way. There are -no differences in this regard. +Like `XREAD` the `XREADGROUP` command can be used in a blocking way. There +are no differences in this regard. ## What happens when a message is delivered to a consumer? Two things: -1. If the message was never delivered to anyone, that is, if we are talking - about a new message, then a PEL (Pending Entries List) is created. -2. If instead the message was already delivered to this consumer, and it is just - re-fetching the same message again, then the _last delivery counter_ is - updated to the current time, and the _number of deliveries_ is incremented by - one. You can access those message properties using the `XPENDING` command. +1. If the message was never delivered to anyone, that is, if we are talking about a new message, then a PEL (Pending Entries List) is created. +2. If instead the message was already delivered to this consumer, and it is just re-fetching the same message again, then the *last delivery counter* is updated to the current time, and the *number of deliveries* is incremented by one. You can access those message properties using the `XPENDING` command. ## Usage example -Normally you use the command like that in order to get new messages and process -them. In pseudo-code: +Normally you use the command like that in order to get new messages and +process them. In pseudo-code: ``` WHILE true @@ -119,29 +90,58 @@ END ``` In this way the example consumer code will fetch only new messages, process -them, and acknowledge them via `XACK`. However the example code above is not -complete, because it does not handle recovering after a crash. What will happen -if we crash in the middle of processing messages, is that our messages will -remain in the pending entries list, so we can access our history by giving -`XREADGROUP` initially an ID of 0, and performing the same loop. Once providing -an ID of 0 the reply is an empty set of messages, we know that we processed and -acknowledged all the pending messages: we can start to use `>` as ID, in order -to get the new messages and rejoin the consumers that are processing new things. +them, and acknowledge them via `XACK`. However the example code above is +not complete, because it does not handle recovering after a crash. What +will happen if we crash in the middle of processing messages, is that our +messages will remain in the pending entries list, so we can access our +history by giving `XREADGROUP` initially an ID of 0, and performing the same +loop. Once providing an ID of 0 the reply is an empty set of messages, we +know that we processed and acknowledged all the pending messages: we +can start to use `>` as ID, in order to get the new messages and rejoin the +consumers that are processing new things. To see how the command actually replies, please check the `XREAD` command page. +## What happens when a pending message is deleted? + +Entries may be deleted from the stream due to trimming or explicit calls to `XDEL` at any time. +By design, Redis doesn't prevent the deletion of entries that are present in the stream's PELs. +When this happens, the PELs retain the deleted entries' IDs, but the actual entry payload is no longer available. +Therefore, when reading such PEL entries, Redis will return a null value in place of their respective data. + +Example: + +``` +> XADD mystream 1 myfield mydata +"1-0" +> XGROUP CREATE mystream mygroup 0 +OK +> XREADGROUP GROUP mygroup myconsumer STREAMS STREAMS mystream > +1) 1) "mystream" + 2) 1) 1) "1-0" + 2) 1) "myfield" + 2) "mydata" +> XDEL mystream 1-0 +(integer) 1 +> XREADGROUP GROUP mygroup myconsumer STREAMS STREAMS mystream 0 +1) 1) "mystream" + 2) 1) 1) "1-0" + 2) (nil) +``` + @return @array-reply, specifically: -The command returns an array of results: each element of the returned array is -an array composed of a two element containing the key name and the entries -reported for that key. The entries reported are full stream entries, having IDs -and the list of all the fields and values. Field and values are guaranteed to be -reported in the same order they were added by `XADD`. +The command returns an array of results: each element of the returned +array is an array composed of a two element containing the key name and +the entries reported for that key. The entries reported are full stream +entries, having IDs and the list of all the fields and values. Field and +values are guaranteed to be reported in the same order they were added +by `XADD`. When **BLOCK** is used, on timeout a null reply is returned. Reading the [Redis Streams introduction](/topics/streams-intro) is highly -suggested in order to understand more about the streams overall behavior and -semantics. +suggested in order to understand more about the streams overall behavior +and semantics. diff --git a/iredis/data/commands/xrevrange.md b/iredis/data/commands/xrevrange.md index e7c317d..d61b3f5 100644 --- a/iredis/data/commands/xrevrange.md +++ b/iredis/data/commands/xrevrange.md @@ -1,16 +1,16 @@ This command is exactly like `XRANGE`, but with the notable difference of -returning the entries in reverse order, and also taking the start-end range in -reverse order: in `XREVRANGE` you need to state the _end_ ID and later the -_start_ ID, and the command will produce all the element between (or exactly -like) the two IDs, starting from the _end_ side. +returning the entries in reverse order, and also taking the start-end +range in reverse order: in `XREVRANGE` you need to state the *end* ID +and later the *start* ID, and the command will produce all the element +between (or exactly like) the two IDs, starting from the *end* side. -So for instance, to get all the elements from the higher ID to the lower ID one -could use: +So for instance, to get all the elements from the higher ID to the lower +ID one could use: XREVRANGE somestream + - -Similarly to get just the last element added into the stream it is enough to -send: +Similarly to get just the last element added into the stream it is +enough to send: XREVRANGE somestream + - COUNT 1 @@ -18,15 +18,11 @@ send: @array-reply, specifically: -The command returns the entries with IDs matching the specified range, from the -higher ID to the lower ID matching. The returned entries are complete, that -means that the ID and all the fields they are composed are returned. Moreover -the entries are returned with their fields and values in the exact same order as -`XADD` added them. - -@history - -- `>= 6.2` Added exclusive ranges. +The command returns the entries with IDs matching the specified range, +from the higher ID to the lower ID matching. +The returned entries are complete, that means that the ID and all the fields +they are composed are returned. Moreover the entries are returned with +their fields and values in the exact same order as `XADD` added them. @examples diff --git a/iredis/data/commands/xsetid.md b/iredis/data/commands/xsetid.md new file mode 100644 index 0000000..39b593c --- /dev/null +++ b/iredis/data/commands/xsetid.md @@ -0,0 +1,2 @@ +The `XSETID` command is an internal command. +It is used by a Redis master to replicate the last delivered ID of streams.
\ No newline at end of file diff --git a/iredis/data/commands/xtrim.md b/iredis/data/commands/xtrim.md index 0903442..08d55d5 100644 --- a/iredis/data/commands/xtrim.md +++ b/iredis/data/commands/xtrim.md @@ -1,12 +1,9 @@ -`XTRIM` trims the stream by evicting older entries (entries with lower IDs) if -needed. +`XTRIM` trims the stream by evicting older entries (entries with lower IDs) if needed. Trimming the stream can be done using one of these strategies: -- `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified - `threshold`, where `threshold` is a positive integer. -- `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is - a stream ID. +* `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified `threshold`, where `threshold` is a positive integer. +* `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is a stream ID. For example, this will trim the stream to exactly the latest 1000 items: @@ -14,27 +11,23 @@ For example, this will trim the stream to exactly the latest 1000 items: XTRIM mystream MAXLEN 1000 ``` -Whereas in this example, all entries that have an ID lower than 649085820-0 will -be evicted: +Whereas in this example, all entries that have an ID lower than 649085820-0 will be evicted: ``` XTRIM mystream MINID 649085820 ``` -By default, or when provided with the optional `=` argument, the command -performs exact trimming. +By default, or when provided with the optional `=` argument, the command performs exact trimming. Depending on the strategy, exact trimming means: -- `MAXLEN`: the trimmed stream's length will be exactly the minimum between its - original length and the specified `threshold`. -- `MINID`: the oldest ID in the stream will be exactly the minimum between its - original oldest ID and the specified `threshold`. +* `MAXLEN`: the trimmed stream's length will be exactly the minimum between its original length and the specified `threshold`. +* `MINID`: the oldest ID in the stream will be exactly the maximum between its original oldest ID and the specified `threshold`. -## Nearly exact trimming +Nearly exact trimming +--- -Because exact trimming may require additional effort from the Redis server, the -optional `~` argument can be provided to make it more efficient. +Because exact trimming may require additional effort from the Redis server, the optional `~` argument can be provided to make it more efficient. For example: @@ -42,29 +35,19 @@ For example: XTRIM mystream MAXLEN ~ 1000 ``` -The `~` argument between the `MAXLEN` strategy and the `threshold` means that -the user is requesting to trim the stream so its length is **at least** the -`threshold`, but possibly slightly more. In this case, Redis will stop trimming -early when performance can be gained (for example, when a whole macro node in -the data structure can't be removed). This makes trimming much more efficient, -and it is usually what you want, although after trimming, the stream may have -few tens of additional entries over the `threshold`. - -Another way to control the amount of work done by the command when using the -`~`, is the `LIMIT` clause. When used, it specifies the maximal `count` of -entries that will be evicted. When `LIMIT` and `count` aren't specified, the -default value of 100 \* the number of entries in a macro node will be implicitly -used as the `count`. Specifying the value 0 as `count` disables the limiting -mechanism entirely. +The `~` argument between the `MAXLEN` strategy and the `threshold` means that the user is requesting to trim the stream so its length is **at least** the `threshold`, but possibly slightly more. +In this case, Redis will stop trimming early when performance can be gained (for example, when a whole macro node in the data structure can't be removed). +This makes trimming much more efficient, and it is usually what you want, although after trimming, the stream may have few tens of additional entries over the `threshold`. + +Another way to control the amount of work done by the command when using the `~`, is the `LIMIT` clause. +When used, it specifies the maximal `count` of entries that will be evicted. +When `LIMIT` and `count` aren't specified, the default value of 100 * the number of entries in a macro node will be implicitly used as the `count`. +Specifying the value 0 as `count` disables the limiting mechanism entirely. @return @integer-reply: The number of entries deleted from the stream. -@history - -- `>= 6.2`: Added the `MINID` trimming strategy and the `LIMIT` option. - @examples ```cli diff --git a/iredis/data/commands/zadd.md b/iredis/data/commands/zadd.md index f14b085..eb77de6 100644 --- a/iredis/data/commands/zadd.md +++ b/iredis/data/commands/zadd.md @@ -1,55 +1,44 @@ Adds all the specified members with the specified scores to the sorted set -stored at `key`. It is possible to specify multiple score / member pairs. If a -specified member is already a member of the sorted set, the score is updated and -the element reinserted at the right position to ensure the correct ordering. +stored at `key`. +It is possible to specify multiple score / member pairs. +If a specified member is already a member of the sorted set, the score is +updated and the element reinserted at the right position to ensure the correct +ordering. If `key` does not exist, a new sorted set with the specified members as sole -members is created, like if the sorted set was empty. If the key exists but does -not hold a sorted set, an error is returned. +members is created, like if the sorted set was empty. If the key exists but does not hold a sorted set, an error is returned. -The score values should be the string representation of a double precision -floating point number. `+inf` and `-inf` values are valid values as well. +The score values should be the string representation of a double precision floating point number. `+inf` and `-inf` values are valid values as well. -## ZADD options +ZADD options +--- ZADD supports a list of options, specified after the name of the key and before the first score argument. Options are: -- **XX**: Only update elements that already exist. Don't add new elements. -- **NX**: Only add new elements. Don't update already existing elements. -- **LT**: Only update existing elements if the new score is **less than** the - current score. This flag doesn't prevent adding new elements. -- **GT**: Only update existing elements if the new score is **greater than** the - current score. This flag doesn't prevent adding new elements. -- **CH**: Modify the return value from the number of new elements added, to the - total number of elements changed (CH is an abbreviation of _changed_). Changed - elements are **new elements added** and elements already existing for which - **the score was updated**. So elements specified in the command line having - the same score as they had in the past are not counted. Note: normally the - return value of `ZADD` only counts the number of new elements added. -- **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one - score-element pair can be specified in this mode. +* **XX**: Only update elements that already exist. Don't add new elements. +* **NX**: Only add new elements. Don't update already existing elements. +* **LT**: Only update existing elements if the new score is **less than** the current score. This flag doesn't prevent adding new elements. +* **GT**: Only update existing elements if the new score is **greater than** the current score. This flag doesn't prevent adding new elements. +* **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the score was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally the return value of `ZADD` only counts the number of new elements added. +* **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one score-element pair can be specified in this mode. Note: The **GT**, **LT** and **NX** options are mutually exclusive. -## Range of integer scores that can be expressed precisely +Range of integer scores that can be expressed precisely +--- -Redis sorted sets use a _double 64-bit floating point number_ to represent the -score. In all the architectures we support, this is represented as an **IEEE 754 -floating point number**, that is able to represent precisely integer numbers -between `-(2^53)` and `+(2^53)` included. In more practical terms, all the -integers between -9007199254740992 and 9007199254740992 are perfectly -representable. Larger integers, or fractions, are internally represented in -exponential form, so it is possible that you get only an approximation of the -decimal number, or of the very big integer, that you set as score. +Redis sorted sets use a *double 64-bit floating point number* to represent the score. In all the architectures we support, this is represented as an **IEEE 754 floating point number**, that is able to represent precisely integer numbers between `-(2^53)` and `+(2^53)` included. In more practical terms, all the integers between -9007199254740992 and 9007199254740992 are perfectly representable. Larger integers, or fractions, are internally represented in exponential form, so it is possible that you get only an approximation of the decimal number, or of the very big integer, that you set as score. -## Sorted sets 101 +Sorted sets 101 +--- -Sorted sets are sorted by their score in an ascending way. The same element only -exists a single time, no repeated elements are permitted. The score can be -modified both by `ZADD` that will update the element score, and as a side -effect, its position on the sorted set, and by `ZINCRBY` that can be used in -order to update the score relatively to its previous value. +Sorted sets are sorted by their score in an ascending way. +The same element only exists a single time, no repeated elements are +permitted. The score can be modified both by `ZADD` that will update the +element score, and as a side effect, its position on the sorted set, and +by `ZINCRBY` that can be used in order to update the score relatively to its +previous value. The current score of an element can be retrieved using the `ZSCORE` command, that can also be used to verify if an element already exists or not. @@ -59,44 +48,26 @@ sets][tdtss]. [tdtss]: /topics/data-types#sorted-sets -## Elements with the same score +Elements with the same score +--- -While the same element can't be repeated in a sorted set since every element is -unique, it is possible to add multiple different elements _having the same -score_. When multiple elements have the same score, they are _ordered -lexicographically_ (they are still ordered by score as a first key, however, -locally, all the elements with the same score are relatively ordered -lexicographically). +While the same element can't be repeated in a sorted set since every element +is unique, it is possible to add multiple different elements *having the same score*. When multiple elements have the same score, they are *ordered lexicographically* (they are still ordered by score as a first key, however, locally, all the elements with the same score are relatively ordered lexicographically). -The lexicographic ordering used is binary, it compares strings as array of -bytes. +The lexicographic ordering used is binary, it compares strings as array of bytes. -If the user inserts all the elements in a sorted set with the same score (for -example 0), all the elements of the sorted set are sorted lexicographically, and -range queries on elements are possible using the command `ZRANGEBYLEX` (Note: it -is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`). +If the user inserts all the elements in a sorted set with the same score (for example 0), all the elements of the sorted set are sorted lexicographically, and range queries on elements are possible using the command `ZRANGEBYLEX` (Note: it is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`). @return @integer-reply, specifically: -- When used without optional arguments, the number of elements added to the - sorted set (excluding score updates). -- If the `CH` option is specified, the number of elements that were changed - (added or updated). +* When used without optional arguments, the number of elements added to the sorted set (excluding score updates). +* If the `CH` option is specified, the number of elements that were changed (added or updated). If the `INCR` option is specified, the return value will be @bulk-string-reply: -- The new score of `member` (a double precision floating point number) - represented as string, or `nil` if the operation was aborted (when called with - either the `XX` or the `NX` option). - -@history - -- `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was - possible to add or update a single member per call. -- `>= 3.0.2`: Added the `XX`, `NX`, `CH` and `INCR` options. -- `>= 6.2`: Added the `GT` and `LT` options. +* The new score of `member` (a double precision floating point number) represented as string, or `nil` if the operation was aborted (when called with either the `XX` or the `NX` option). @examples diff --git a/iredis/data/commands/zcount.md b/iredis/data/commands/zcount.md index 49e6dd6..82ce39b 100644 --- a/iredis/data/commands/zcount.md +++ b/iredis/data/commands/zcount.md @@ -4,9 +4,7 @@ Returns the number of elements in the sorted set at `key` with a score between The `min` and `max` arguments have the same semantic as described for `ZRANGEBYSCORE`. -Note: the command has a complexity of just O(log(N)) because it uses elements -ranks (see `ZRANK`) to get an idea of the range. Because of this there is no -need to do a work proportional to the size of the range. +Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see `ZRANK`) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. @return diff --git a/iredis/data/commands/zdiff.md b/iredis/data/commands/zdiff.md index 1b587b9..d9449b7 100644 --- a/iredis/data/commands/zdiff.md +++ b/iredis/data/commands/zdiff.md @@ -3,8 +3,8 @@ sorted set, it is returned to the client. @return -@array-reply: the result of the difference (optionally with their scores, in -case the `WITHSCORES` option is given). +@array-reply: the result of the difference (optionally with their scores, in case +the `WITHSCORES` option is given). @examples diff --git a/iredis/data/commands/zincrby.md b/iredis/data/commands/zincrby.md index 0ac8a89..0b8ccf0 100644 --- a/iredis/data/commands/zincrby.md +++ b/iredis/data/commands/zincrby.md @@ -1,14 +1,15 @@ Increments the score of `member` in the sorted set stored at `key` by -`increment`. If `member` does not exist in the sorted set, it is added with -`increment` as its score (as if its previous score was `0.0`). If `key` does not -exist, a new sorted set with the specified `member` as its sole member is -created. +`increment`. +If `member` does not exist in the sorted set, it is added with `increment` as +its score (as if its previous score was `0.0`). +If `key` does not exist, a new sorted set with the specified `member` as its +sole member is created. An error is returned when `key` exists but does not hold a sorted set. The `score` value should be the string representation of a numeric value, and -accepts double precision floating point numbers. It is possible to provide a -negative value to decrement the score. +accepts double precision floating point numbers. +It is possible to provide a negative value to decrement the score. @return diff --git a/iredis/data/commands/zinter.md b/iredis/data/commands/zinter.md index 297c912..5a7adcc 100644 --- a/iredis/data/commands/zinter.md +++ b/iredis/data/commands/zinter.md @@ -5,7 +5,7 @@ For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. @return -@array-reply: the result of intersection (optionally with their scores, in case +@array-reply: the result of intersection (optionally with their scores, in case the `WITHSCORES` option is given). @examples diff --git a/iredis/data/commands/zintercard.md b/iredis/data/commands/zintercard.md new file mode 100644 index 0000000..613849f --- /dev/null +++ b/iredis/data/commands/zintercard.md @@ -0,0 +1,25 @@ +This command is similar to `ZINTER`, but instead of returning the result set, it returns just the cardinality of the result. + +Keys that do not exist are considered to be empty sets. +With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). + +By default, the command calculates the cardinality of the intersection of all given sets. +When provided with the optional `LIMIT` argument (which defaults to 0 and means unlimited), if the intersection cardinality reaches limit partway through the computation, the algorithm will exit and yield limit as the cardinality. +Such implementation ensures a significant speedup for queries where the limit is lower than the actual intersection cardinality. + +@return + +@integer-reply: the number of elements in the resulting intersection. + +@examples + +```cli +ZADD zset1 1 "one" +ZADD zset1 2 "two" +ZADD zset2 1 "one" +ZADD zset2 2 "two" +ZADD zset2 3 "three" +ZINTER 2 zset1 zset2 +ZINTERCARD 2 zset1 zset2 +ZINTERCARD 2 zset1 zset2 LIMIT 1 +``` diff --git a/iredis/data/commands/zinterstore.md b/iredis/data/commands/zinterstore.md index e7e71f0..0ecda0d 100644 --- a/iredis/data/commands/zinterstore.md +++ b/iredis/data/commands/zinterstore.md @@ -1,12 +1,13 @@ Computes the intersection of `numkeys` sorted sets given by the specified keys, -and stores the result in `destination`. It is mandatory to provide the number of -input keys (`numkeys`) before passing the input keys and the other (optional) -arguments. +and stores the result in `destination`. +It is mandatory to provide the number of input keys (`numkeys`) before passing +the input keys and the other (optional) arguments. By default, the resulting score of an element is the sum of its scores in the -sorted sets where it exists. Because intersection requires an element to be a -member of every given sorted set, this results in the score of every element in -the resulting sorted set to be equal to the number of input sorted sets. +sorted sets where it exists. +Because intersection requires an element to be a member of every given sorted +set, this results in the score of every element in the resulting sorted set to +be equal to the number of input sorted sets. For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. diff --git a/iredis/data/commands/zlexcount.md b/iredis/data/commands/zlexcount.md index 9aa7092..15484f7 100644 --- a/iredis/data/commands/zlexcount.md +++ b/iredis/data/commands/zlexcount.md @@ -1,13 +1,9 @@ -When all the elements in a sorted set are inserted with the same score, in order -to force lexicographical ordering, this command returns the number of elements -in the sorted set at `key` with a value between `min` and `max`. +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns the number of elements in the sorted set at `key` with a value between `min` and `max`. The `min` and `max` arguments have the same meaning as described for `ZRANGEBYLEX`. -Note: the command has a complexity of just O(log(N)) because it uses elements -ranks (see `ZRANK`) to get an idea of the range. Because of this there is no -need to do a work proportional to the size of the range. +Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see `ZRANK`) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. @return diff --git a/iredis/data/commands/zmpop.md b/iredis/data/commands/zmpop.md new file mode 100644 index 0000000..16848a0 --- /dev/null +++ b/iredis/data/commands/zmpop.md @@ -0,0 +1,36 @@ +Pops one or more elements, that are member-score pairs, from the first non-empty sorted set in the provided list of key names. + +`ZMPOP` and `BZMPOP` are similar to the following, more limited, commands: + +- `ZPOPMIN` or `ZPOPMAX` which take only one key, and can return multiple elements. +- `BZPOPMIN` or `BZPOPMAX` which take multiple keys, but return only one element from just one key. + +See `BZMPOP` for the blocking variant of this command. + +When the `MIN` modifier is used, the elements popped are those with the lowest scores from the first non-empty sorted set. The `MAX` modifier causes elements with the highest scores to be popped. +The optional `COUNT` can be used to specify the number of elements to pop, and is set to 1 by default. + +The number of popped elements is the minimum from the sorted set's cardinality and `COUNT`'s value. + +@return + +@array-reply: specifically: + +* A `nil` when no element could be popped. +* A two-element array with the first element being the name of the key from which elements were popped, and the second element is an array of the popped elements. Every entry in the elements array is also an array that contains the member and its score. + +@examples + +```cli +ZMPOP 1 notsuchkey MIN +ZADD myzset 1 "one" 2 "two" 3 "three" +ZMPOP 1 myzset MIN +ZRANGE myzset 0 -1 WITHSCORES +ZMPOP 1 myzset MAX COUNT 10 +ZADD myzset2 4 "four" 5 "five" 6 "six" +ZMPOP 2 myzset myzset2 MIN COUNT 10 +ZRANGE myzset 0 -1 WITHSCORES +ZMPOP 2 myzset myzset2 MAX COUNT 10 +ZRANGE myzset2 0 -1 WITHSCORES +EXISTS myzset myzset2 +``` diff --git a/iredis/data/commands/zmscore.md b/iredis/data/commands/zmscore.md index 69818fb..c2317e9 100644 --- a/iredis/data/commands/zmscore.md +++ b/iredis/data/commands/zmscore.md @@ -1,13 +1,11 @@ -Returns the scores associated with the specified `members` in the sorted set -stored at `key`. +Returns the scores associated with the specified `members` in the sorted set stored at `key`. -For every `member` that does not exist in the sorted set, a `nil` value is -returned. +For every `member` that does not exist in the sorted set, a `nil` value is returned. @return -@array-reply: list of scores or `nil` associated with the specified `member` -values (a double precision floating point number), represented as strings. +@array-reply: list of scores or `nil` associated with the specified `member` values (a double precision floating point number), +represented as strings. @examples diff --git a/iredis/data/commands/zpopmax.md b/iredis/data/commands/zpopmax.md index dea7a16..8f6750a 100644 --- a/iredis/data/commands/zpopmax.md +++ b/iredis/data/commands/zpopmax.md @@ -3,8 +3,8 @@ set stored at `key`. When left unspecified, the default value for `count` is 1. Specifying a `count` value that is higher than the sorted set's cardinality will not produce an -error. When returning multiple elements, the one with the highest score will be -the first, followed by the elements with lower scores. +error. When returning multiple elements, the one with the highest score will +be the first, followed by the elements with lower scores. @return diff --git a/iredis/data/commands/zpopmin.md b/iredis/data/commands/zpopmin.md index 789e30c..16f7c97 100644 --- a/iredis/data/commands/zpopmin.md +++ b/iredis/data/commands/zpopmin.md @@ -3,8 +3,8 @@ set stored at `key`. When left unspecified, the default value for `count` is 1. Specifying a `count` value that is higher than the sorted set's cardinality will not produce an -error. When returning multiple elements, the one with the lowest score will be -the first, followed by the elements with greater scores. +error. When returning multiple elements, the one with the lowest score will +be the first, followed by the elements with greater scores. @return diff --git a/iredis/data/commands/zrandmember.md b/iredis/data/commands/zrandmember.md index fba72d3..aae0b25 100644 --- a/iredis/data/commands/zrandmember.md +++ b/iredis/data/commands/zrandmember.md @@ -1,27 +1,19 @@ -When called with just the `key` argument, return a random element from the -sorted set value stored at `key`. +When called with just the `key` argument, return a random element from the sorted set value stored at `key`. -If the provided `count` argument is positive, return an array of **distinct -elements**. The array's length is either `count` or the sorted set's cardinality -(`ZCARD`), whichever is lower. +If the provided `count` argument is positive, return an array of **distinct elements**. +The array's length is either `count` or the sorted set's cardinality (`ZCARD`), whichever is lower. -If called with a negative `count`, the behavior changes and the command is -allowed to return the **same element multiple times**. In this case, the number -of returned elements is the absolute value of the specified `count`. +If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. +In this case, the number of returned elements is the absolute value of the specified `count`. -The optional `WITHSCORES` modifier changes the reply so it includes the -respective scores of the randomly selected elements from the sorted set. +The optional `WITHSCORES` modifier changes the reply so it includes the respective scores of the randomly selected elements from the sorted set. @return -@bulk-string-reply: without the additional `count` argument, the command returns -a Bulk Reply with the randomly selected element, or `nil` when `key` does not -exist. +@bulk-string-reply: without the additional `count` argument, the command returns a Bulk Reply with the randomly selected element, or `nil` when `key` does not exist. -@array-reply: when the additional `count` argument is passed, the command -returns an array of elements, or an empty array when `key` does not exist. If -the `WITHSCORES` modifier is used, the reply is a list elements and their scores -from the sorted set. +@array-reply: when the additional `count` argument is passed, the command returns an array of elements, or an empty array when `key` does not exist. +If the `WITHSCORES` modifier is used, the reply is a list elements and their scores from the sorted set. @examples @@ -36,15 +28,12 @@ ZRANDMEMBER dadi -5 WITHSCORES When the `count` argument is a positive value this command behaves as follows: -- No repeated elements are returned. -- If `count` is bigger than the cardinality of the sorted set, the command will - only return the whole sorted set without additional elements. -- The order of elements in the reply is not truly random, so it is up to the - client to shuffle them if needed. +* No repeated elements are returned. +* If `count` is bigger than the cardinality of the sorted set, the command will only return the whole sorted set without additional elements. +* The order of elements in the reply is not truly random, so it is up to the client to shuffle them if needed. When the `count` is a negative value, the behavior changes as follows: -- Repeating elements are possible. -- Exactly `count` elements, or an empty array if the sorted set is empty - (non-existing key), are always returned. -- The order of elements in the reply is truly random. +* Repeating elements are possible. +* Exactly `count` elements, or an empty array if the sorted set is empty (non-existing key), are always returned. +* The order of elements in the reply is truly random. diff --git a/iredis/data/commands/zrange.md b/iredis/data/commands/zrange.md index f079b61..1a8e421 100644 --- a/iredis/data/commands/zrange.md +++ b/iredis/data/commands/zrange.md @@ -1,68 +1,41 @@ Returns the specified range of elements in the sorted set stored at `<key>`. -`ZRANGE` can perform different types of range queries: by index (rank), by the -score, or by lexicographical order. +`ZRANGE` can perform different types of range queries: by index (rank), by the score, or by lexicographical order. -Starting with Redis 6.2.0, this command can replace the following commands: -`ZREVRANGE`, `ZRANGEBYSCORE`, `ZREVRANGEBYSCORE`, `ZRANGEBYLEX` and -`ZREVRANGEBYLEX`. +Starting with Redis 6.2.0, this command can replace the following commands: `ZREVRANGE`, `ZRANGEBYSCORE`, `ZREVRANGEBYSCORE`, `ZRANGEBYLEX` and `ZREVRANGEBYLEX`. ## Common behavior and options -The order of elements is from the lowest to the highest score. Elements with the -same score are ordered lexicographically. +The order of elements is from the lowest to the highest score. Elements with the same score are ordered lexicographically. -The optional `REV` argument reverses the ordering, so elements are ordered from -highest to lowest score, and score ties are resolved by reverse lexicographical -ordering. +The optional `REV` argument reverses the ordering, so elements are ordered from highest to lowest score, and score ties are resolved by reverse lexicographical ordering. -The optional `LIMIT` argument can be used to obtain a sub-range from the -matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative -`<count>` returns all elements from the `<offset>`. Keep in mind that if -`<offset>` is large, the sorted set needs to be traversed for `<offset>` -elements before getting to the elements to return, which can add up to O(N) time -complexity. +The optional `LIMIT` argument can be used to obtain a sub-range from the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). +A negative `<count>` returns all elements from the `<offset>`. Keep in mind that if `<offset>` is large, the sorted set needs to be traversed for `<offset>` elements before getting to the elements to return, which can add up to O(N) time complexity. -The optional `WITHSCORES` argument supplements the command's reply with the -scores of elements returned. The returned list contains -`value1,score1,...,valueN,scoreN` instead of `value1,...,valueN`. Client -libraries are free to return a more appropriate data type (suggestion: an array -with (value, score) arrays/tuples). +The optional `WITHSCORES` argument supplements the command's reply with the scores of elements returned. The returned list contains `value1,score1,...,valueN,scoreN` instead of `value1,...,valueN`. Client libraries are free to return a more appropriate data type (suggestion: an array with (value, score) arrays/tuples). ## Index ranges -By default, the command performs an index range query. The `<min>` and `<max>` -arguments represent zero-based indexes, where `0` is the first element, `1` is -the next element, and so on. These arguments specify an **inclusive range**, so -for example, `ZRANGE myzset 0 1` will return both the first and the second -element of the sorted set. +By default, the command performs an index range query. The `<start>` and `<stop>` arguments represent zero-based indexes, where `0` is the first element, `1` is the next element, and so on. These arguments specify an **inclusive range**, so for example, `ZRANGE myzset 0 1` will return both the first and the second element of the sorted set. -The indexes can also be negative numbers indicating offsets from the end of the -sorted set, with `-1` being the last element of the sorted set, `-2` the -penultimate element, and so on. +The indexes can also be negative numbers indicating offsets from the end of the sorted set, with `-1` being the last element of the sorted set, `-2` the penultimate element, and so on. Out of range indexes do not produce an error. -If `<min>` is greater than either the end index of the sorted set or `<max>`, an -empty list is returned. +If `<start>` is greater than either the end index of the sorted set or `<stop>`, an empty list is returned. -If `<max>` is greater than the end index of the sorted set, Redis will use the -last element of the sorted set. +If `<stop>` is greater than the end index of the sorted set, Redis will use the last element of the sorted set. ## Score ranges -When the `BYSCORE` option is provided, the command behaves like `ZRANGEBYSCORE` -and returns the range of elements from the sorted set having scores equal or -between `<min>` and `<max>`. +When the `BYSCORE` option is provided, the command behaves like `ZRANGEBYSCORE` and returns the range of elements from the sorted set having scores equal or between `<start>` and `<stop>`. -`<min>` and `<max>` can be `-inf` and `+inf`, denoting the negative and positive -infinities, respectively. This means that you are not required to know the -highest or lowest score in the sorted set to get all elements from or up to a -certain score. +`<start>` and `<stop>` can be `-inf` and `+inf`, denoting the negative and positive infinities, respectively. This means that you are not required to know the highest or lowest score in the sorted set to get all elements from or up to a certain score. -By default, the score intervals specified by `<min>` and `<max>` are closed -(inclusive). It is possible to specify an open interval (exclusive) by prefixing -the score with the character `(`. +By default, the score intervals specified by `<start>` and `<stop>` are closed (inclusive). +It is possible to specify an open interval (exclusive) by prefixing the score +with the character `(`. For example: @@ -78,59 +51,59 @@ ZRANGE zset (5 (10 BYSCORE Will return all the elements with `5 < score < 10` (5 and 10 excluded). +## Reverse ranges + +Using the `REV` option reverses the sorted set, with index 0 as the element with the highest score. + +By default, `<start>` must be less than or equal to `<stop>` to return anything. +However, if the `BYSCORE`, or `BYLEX` options are selected, the `<start>` is the highest score to consider, and `<stop>` is the lowest score to consider, therefore `<start>` must be greater than or equal to `<stop>` in order to return anything. + +For example: + +``` +ZRANGE zset 5 10 REV +``` + +Will return the elements between index 5 and 10 in the reversed index. + +``` +ZRANGE zset 10 5 REV BYSCORE +``` + +Will return all elements with scores less than 10 and greater than 5. + ## Lexicographical ranges -When the `BYLEX` option is used, the command behaves like `ZRANGEBYLEX` and -returns the range of elements from the sorted set between the `<min>` and -`<max>` lexicographical closed range intervals. +When the `BYLEX` option is used, the command behaves like `ZRANGEBYLEX` and returns the range of elements from the sorted set between the `<start>` and `<stop>` lexicographical closed range intervals. -Note that lexicographical ordering relies on all elements having the same score. -The reply is unspecified when the elements have different scores. +Note that lexicographical ordering relies on all elements having the same score. The reply is unspecified when the elements have different scores. -Valid `<min>` and `<max>` must start with `(` or `[`, in order to specify +Valid `<start>` and `<stop>` must start with `(` or `[`, in order to specify whether the range interval is exclusive or inclusive, respectively. -The special values of `+` or `-` `<min>` and `<max>` mean positive and negative -infinite strings, respectively, so for instance the command **ZRANGEBYLEX -myzset - +** is guaranteed to return all the elements in the sorted set, -providing that all the elements have the same score. +The special values of `+` or `-` for `<start>` and `<stop>` mean positive and negative infinite strings, respectively, so for instance the command `ZRANGE myzset - + BYLEX` is guaranteed to return all the elements in the sorted set, providing that all the elements have the same score. + +The `REV` options reverses the order of the `<start>` and `<stop>` elements, where `<start>` must be lexicographically greater than `<stop>` to produce a non-empty result. ### Lexicographical comparison of strings -Strings are compared as a binary array of bytes. Because of how the ASCII -character set is specified, this means that usually this also have the effect of -comparing normal ASCII characters in an obvious dictionary way. However, this is -not true if non-plain ASCII strings are used (for example, utf8 strings). +Strings are compared as a binary array of bytes. Because of how the ASCII character set is specified, this means that usually this also have the effect of comparing normal ASCII characters in an obvious dictionary way. However, this is not true if non-plain ASCII strings are used (for example, utf8 strings). -However, the user can apply a transformation to the encoded string so that the -first part of the element inserted in the sorted set will compare as the user -requires for the specific application. For example, if I want to add strings -that will be compared in a case-insensitive way, but I still want to retrieve -the real case when querying, I can add strings in the following way: +However, the user can apply a transformation to the encoded string so that the first part of the element inserted in the sorted set will compare as the user requires for the specific application. For example, if I want to +add strings that will be compared in a case-insensitive way, but I still +want to retrieve the real case when querying, I can add strings in the +following way: ZADD autocomplete 0 foo:Foo 0 bar:BAR 0 zap:zap -Because of the first _normalized_ part in every element (before the colon -character), we are forcing a given comparison. However, after the range is -queried using `ZRANGE ... BYLEX`, the application can display to the user the -second part of the string, after the colon. +Because of the first *normalized* part in every element (before the colon character), we are forcing a given comparison. However, after the range is queried using `ZRANGE ... BYLEX`, the application can display to the user the second part of the string, after the colon. -The binary nature of the comparison allows to use sorted sets as a general -purpose index, for example, the first part of the element can be a 64-bit -big-endian number. Since big-endian numbers have the most significant bytes in -the initial positions, the binary comparison will match the numerical comparison -of the numbers. This can be used in order to implement range queries on 64-bit -values. As in the example below, after the first 8 bytes, we can store the value -of the element we are indexing. +The binary nature of the comparison allows to use sorted sets as a general purpose index, for example, the first part of the element can be a 64-bit big-endian number. Since big-endian numbers have the most significant bytes in the initial positions, the binary comparison will match the numerical comparison of the numbers. This can be used in order to implement range queries on 64-bit values. As in the example below, after the first 8 bytes, we can store the value of the element we are indexing. @return -@array-reply: list of elements in the specified range (optionally with their -scores, in case the `WITHSCORES` option is given). - -@history - -- `>= 6.2`: Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options. +@array-reply: list of elements in the specified range (optionally with +their scores, in case the `WITHSCORES` option is given). @examples @@ -143,17 +116,14 @@ ZRANGE myzset 2 3 ZRANGE myzset -2 -1 ``` -The following example using `WITHSCORES` shows how the command returns always an -array, but this time, populated with _element_1_, _score_1_, _element_2_, -_score_2_, ..., _element_N_, _score_N_. +The following example using `WITHSCORES` shows how the command returns always an array, but this time, populated with *element_1*, *score_1*, *element_2*, *score_2*, ..., *element_N*, *score_N*. ```cli ZRANGE myzset 0 1 WITHSCORES ``` -This example shows how to query the sorted set by score, excluding the value `1` -and up to infinity, returning only the second element of the result: +This example shows how to query the sorted set by score, excluding the value `1` and up to infinity, returning only the second element of the result: ```cli ZRANGE myzset (1 +inf BYSCORE LIMIT 1 1 -``` +```
\ No newline at end of file diff --git a/iredis/data/commands/zrangebylex.md b/iredis/data/commands/zrangebylex.md index 55f3002..4eefffc 100644 --- a/iredis/data/commands/zrangebylex.md +++ b/iredis/data/commands/zrangebylex.md @@ -1,59 +1,51 @@ -When all the elements in a sorted set are inserted with the same score, in order -to force lexicographical ordering, this command returns all the elements in the -sorted set at `key` with a value between `min` and `max`. +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at `key` with a value between `min` and `max`. -If the elements in the sorted set have different scores, the returned elements -are unspecified. +If the elements in the sorted set have different scores, the returned elements are unspecified. -The elements are considered to be ordered from lower to higher strings as -compared byte-by-byte using the `memcmp()` C function. Longer strings are -considered greater than shorter strings if the common part is identical. - -As per Redis 6.2.0, this command is considered deprecated. Please prefer using -the `ZRANGE` command with the `BYLEX` argument in new code. +The elements are considered to be ordered from lower to higher strings as compared byte-by-byte using the `memcmp()` C function. Longer strings are considered greater than shorter strings if the common part is identical. The optional `LIMIT` argument can be used to only get a range of the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count` -returns all elements from the `offset`. Keep in mind that if `offset` is large, -the sorted set needs to be traversed for `offset` elements before getting to the -elements to return, which can add up to O(N) time complexity. +returns all elements from the `offset`. +Keep in mind that if `offset` is large, the sorted set needs to be traversed for +`offset` elements before getting to the elements to return, which can add up to +O(N) time complexity. ## How to specify intervals -Valid _start_ and _stop_ must start with `(` or `[`, in order to specify if the -range item is respectively exclusive or inclusive. The special values of `+` or -`-` for _start_ and _stop_ have the special meaning or positively infinite and -negatively infinite strings, so for instance the command **ZRANGEBYLEX -myzset - +** is guaranteed to return all the elements in the sorted set, if all -the elements have the same score. +Valid *start* and *stop* must start with `(` or `[`, in order to specify +if the range item is respectively exclusive or inclusive. +The special values of `+` or `-` for *start* and *stop* have the special +meaning or positively infinite and negatively infinite strings, so for +instance the command **ZRANGEBYLEX myzset - +** is guaranteed to return +all the elements in the sorted set, if all the elements have the same +score. ## Details on strings comparison -Strings are compared as binary array of bytes. Because of how the ASCII -character set is specified, this means that usually this also have the effect of -comparing normal ASCII characters in an obvious dictionary way. However this is -not true if non plain ASCII strings are used (for example utf8 strings). +Strings are compared as binary array of bytes. Because of how the ASCII character +set is specified, this means that usually this also have the effect of comparing +normal ASCII characters in an obvious dictionary way. However this is not true +if non plain ASCII strings are used (for example utf8 strings). -However the user can apply a transformation to the encoded string so that the -first part of the element inserted in the sorted set will compare as the user -requires for the specific application. For example if I want to add strings that -will be compared in a case-insensitive way, but I still want to retrieve the -real case when querying, I can add strings in the following way: +However the user can apply a transformation to the encoded string so that +the first part of the element inserted in the sorted set will compare as the +user requires for the specific application. For example if I want to +add strings that will be compared in a case-insensitive way, but I still +want to retrieve the real case when querying, I can add strings in the +following way: ZADD autocomplete 0 foo:Foo 0 bar:BAR 0 zap:zap -Because of the first _normalized_ part in every element (before the colon -character), we are forcing a given comparison, however after the range is -queries using `ZRANGEBYLEX` the application can display to the user the second -part of the string, after the colon. +Because of the first *normalized* part in every element (before the colon character), we are forcing a given comparison, however after the range is queries using `ZRANGEBYLEX` the application can display to the user the second part of the string, after the colon. The binary nature of the comparison allows to use sorted sets as a general -purpose index, for example the first part of the element can be a 64 bit big -endian number: since big endian numbers have the most significant bytes in the -initial positions, the binary comparison will match the numerical comparison of -the numbers. This can be used in order to implement range queries on 64 bit -values. As in the example below, after the first 8 bytes we can store the value -of the element we are actually indexing. +purpose index, for example the first part of the element can be a 64 bit +big endian number: since big endian numbers have the most significant bytes +in the initial positions, the binary comparison will match the numerical +comparison of the numbers. This can be used in order to implement range +queries on 64 bit values. As in the example below, after the first 8 bytes +we can store the value of the element we are actually indexing. @return diff --git a/iredis/data/commands/zrangebyscore.md b/iredis/data/commands/zrangebyscore.md index e102eed..bc81708 100644 --- a/iredis/data/commands/zrangebyscore.md +++ b/iredis/data/commands/zrangebyscore.md @@ -1,23 +1,21 @@ Returns all the elements in the sorted set at `key` with a score between `min` -and `max` (including elements with score equal to `min` or `max`). The elements -are considered to be ordered from low to high scores. +and `max` (including elements with score equal to `min` or `max`). +The elements are considered to be ordered from low to high scores. The elements having the same score are returned in lexicographical order (this follows from a property of the sorted set implementation in Redis and does not involve further computation). -As per Redis 6.2.0, this command is considered deprecated. Please prefer using -the `ZRANGE` command with the `BYSCORE` argument in new code. - The optional `LIMIT` argument can be used to only get a range of the matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count` -returns all elements from the `offset`. Keep in mind that if `offset` is large, -the sorted set needs to be traversed for `offset` elements before getting to the -elements to return, which can add up to O(N) time complexity. +returns all elements from the `offset`. +Keep in mind that if `offset` is large, the sorted set needs to be traversed for +`offset` elements before getting to the elements to return, which can add up to +O(N) time complexity. The optional `WITHSCORES` argument makes the command return both the element and -its score, instead of the element alone. This option is available since Redis -2.0. +its score, instead of the element alone. +This option is available since Redis 2.0. ## Exclusive intervals and infinity @@ -25,9 +23,10 @@ its score, instead of the element alone. This option is available since Redis the highest or lowest score in the sorted set to get all elements from or up to a certain score. -By default, the interval specified by `min` and `max` is closed (inclusive). It -is possible to specify an open interval (exclusive) by prefixing the score with -the character `(`. For example: +By default, the interval specified by `min` and `max` is closed (inclusive). +It is possible to specify an open interval (exclusive) by prefixing the score +with the character `(`. +For example: ``` ZRANGEBYSCORE zset (1 5 @@ -43,8 +42,8 @@ Will return all the elements with `5 < score < 10` (5 and 10 excluded). @return -@array-reply: list of elements in the specified score range (optionally with -their scores). +@array-reply: list of elements in the specified score range (optionally +with their scores). @examples @@ -60,18 +59,18 @@ ZRANGEBYSCORE myzset (1 (2 ## Pattern: weighted random selection of an element -Normally `ZRANGEBYSCORE` is simply used in order to get range of items where the -score is the indexed integer key, however it is possible to do less obvious -things with the command. +Normally `ZRANGEBYSCORE` is simply used in order to get range of items +where the score is the indexed integer key, however it is possible to do less +obvious things with the command. -For example a common problem when implementing Markov chains and other -algorithms is to select an element at random from a set, but different elements -may have different weights that change how likely it is they are picked. +For example a common problem when implementing Markov chains and other algorithms +is to select an element at random from a set, but different elements may have +different weights that change how likely it is they are picked. This is how we use this command in order to mount such an algorithm: -Imagine you have elements A, B and C with weights 1, 2 and 3. You compute the -sum of the weights, which is 1+2+3 = 6 +Imagine you have elements A, B and C with weights 1, 2 and 3. +You compute the sum of the weights, which is 1+2+3 = 6 At this point you add all the elements into a sorted set using this algorithm: @@ -92,12 +91,12 @@ B to score .5 C to score 1 ``` -Since this involves approximations, in order to avoid C is set to, like, 0.998 -instead of 1, we just modify the above algorithm to make sure the last score is -1 (left as an exercise for the reader...). +Since this involves approximations, in order to avoid C is set to, +like, 0.998 instead of 1, we just modify the above algorithm to make sure +the last score is 1 (left as an exercise for the reader...). -At this point, each time you want to get a weighted random element, just compute -a random number between 0 and 1 (which is like calling `rand()` in most -languages), so you can just do: +At this point, each time you want to get a weighted random element, +just compute a random number between 0 and 1 (which is like calling +`rand()` in most languages), so you can just do: RANDOM_ELE = ZRANGEBYSCORE key RAND() +inf LIMIT 0 1 diff --git a/iredis/data/commands/zrangestore.md b/iredis/data/commands/zrangestore.md index ba2b805..8dc744c 100644 --- a/iredis/data/commands/zrangestore.md +++ b/iredis/data/commands/zrangestore.md @@ -1,5 +1,4 @@ -This command is like `ZRANGE`, but stores the result in the `<dst>` destination -key. +This command is like `ZRANGE`, but stores the result in the `<dst>` destination key. @return diff --git a/iredis/data/commands/zrank.md b/iredis/data/commands/zrank.md index 62520d9..1419adf 100644 --- a/iredis/data/commands/zrank.md +++ b/iredis/data/commands/zrank.md @@ -1,14 +1,15 @@ Returns the rank of `member` in the sorted set stored at `key`, with the scores -ordered from low to high. The rank (or index) is 0-based, which means that the -member with the lowest score has rank `0`. +ordered from low to high. +The rank (or index) is 0-based, which means that the member with the lowest +score has rank `0`. Use `ZREVRANK` to get the rank of an element with the scores ordered from high to low. @return -- If `member` exists in the sorted set, @integer-reply: the rank of `member`. -- If `member` does not exist in the sorted set or `key` does not exist, +* If `member` exists in the sorted set, @integer-reply: the rank of `member`. +* If `member` does not exist in the sorted set or `key` does not exist, @bulk-string-reply: `nil`. @examples diff --git a/iredis/data/commands/zrem.md b/iredis/data/commands/zrem.md index 0ce5061..d97fd4b 100644 --- a/iredis/data/commands/zrem.md +++ b/iredis/data/commands/zrem.md @@ -1,5 +1,5 @@ -Removes the specified members from the sorted set stored at `key`. Non existing -members are ignored. +Removes the specified members from the sorted set stored at `key`. +Non existing members are ignored. An error is returned when `key` exists and does not hold a sorted set. @@ -7,14 +7,9 @@ An error is returned when `key` exists and does not hold a sorted set. @integer-reply, specifically: -- The number of members removed from the sorted set, not including non existing +* The number of members removed from the sorted set, not including non existing members. -@history - -- `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was - possible to remove a single member per call. - @examples ```cli diff --git a/iredis/data/commands/zremrangebylex.md b/iredis/data/commands/zremrangebylex.md index cc2cf30..4264f1b 100644 --- a/iredis/data/commands/zremrangebylex.md +++ b/iredis/data/commands/zremrangebylex.md @@ -1,11 +1,6 @@ -When all the elements in a sorted set are inserted with the same score, in order -to force lexicographical ordering, this command removes all elements in the -sorted set stored at `key` between the lexicographical range specified by `min` -and `max`. +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range specified by `min` and `max`. -The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. -Similarly, this command actually removes the same elements that `ZRANGEBYLEX` -would return if called with the same `min` and `max` arguments. +The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command. Similarly, this command actually removes the same elements that `ZRANGEBYLEX` would return if called with the same `min` and `max` arguments. @return diff --git a/iredis/data/commands/zremrangebyrank.md b/iredis/data/commands/zremrangebyrank.md index 4de25f9..edd3cf3 100644 --- a/iredis/data/commands/zremrangebyrank.md +++ b/iredis/data/commands/zremrangebyrank.md @@ -1,9 +1,11 @@ Removes all elements in the sorted set stored at `key` with rank between `start` -and `stop`. Both `start` and `stop` are `0` -based indexes with `0` being the -element with the lowest score. These indexes can be negative numbers, where they -indicate offsets starting at the element with the highest score. For example: -`-1` is the element with the highest score, `-2` the element with the second -highest score and so forth. +and `stop`. +Both `start` and `stop` are `0` -based indexes with `0` being the element with +the lowest score. +These indexes can be negative numbers, where they indicate offsets starting at +the element with the highest score. +For example: `-1` is the element with the highest score, `-2` the element with +the second highest score and so forth. @return diff --git a/iredis/data/commands/zremrangebyscore.md b/iredis/data/commands/zremrangebyscore.md index 3665bd0..fdf9a98 100644 --- a/iredis/data/commands/zremrangebyscore.md +++ b/iredis/data/commands/zremrangebyscore.md @@ -1,9 +1,6 @@ Removes all elements in the sorted set stored at `key` with a score between `min` and `max` (inclusive). -Since version 2.1.6, `min` and `max` can be exclusive, following the syntax of -`ZRANGEBYSCORE`. - @return @integer-reply: the number of elements removed. diff --git a/iredis/data/commands/zrevrange.md b/iredis/data/commands/zrevrange.md index a7a667c..3a19810 100644 --- a/iredis/data/commands/zrevrange.md +++ b/iredis/data/commands/zrevrange.md @@ -1,16 +1,13 @@ -Returns the specified range of elements in the sorted set stored at `key`. The -elements are considered to be ordered from the highest to the lowest score. +Returns the specified range of elements in the sorted set stored at `key`. +The elements are considered to be ordered from the highest to the lowest score. Descending lexicographical order is used for elements with equal score. Apart from the reversed ordering, `ZREVRANGE` is similar to `ZRANGE`. -As per Redis 6.2.0, this command is considered deprecated. Please prefer using -the `ZRANGE` command with the `REV` argument in new code. - @return -@array-reply: list of elements in the specified range (optionally with their -scores). +@array-reply: list of elements in the specified range (optionally with +their scores). @examples diff --git a/iredis/data/commands/zrevrangebylex.md b/iredis/data/commands/zrevrangebylex.md index 1cd9de0..c6772c9 100644 --- a/iredis/data/commands/zrevrangebylex.md +++ b/iredis/data/commands/zrevrangebylex.md @@ -1,12 +1,7 @@ -When all the elements in a sorted set are inserted with the same score, in order -to force lexicographical ordering, this command returns all the elements in the -sorted set at `key` with a value between `max` and `min`. +When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to `ZRANGEBYLEX`. -As per Redis 6.2.0, this command is considered deprecated. Please prefer using -the `ZRANGE` command with the `BYLEX` and `REV` arguments in new code. - @return @array-reply: list of elements in the specified score range. diff --git a/iredis/data/commands/zrevrangebyscore.md b/iredis/data/commands/zrevrangebyscore.md index d41652c..e95d771 100644 --- a/iredis/data/commands/zrevrangebyscore.md +++ b/iredis/data/commands/zrevrangebyscore.md @@ -1,7 +1,7 @@ Returns all the elements in the sorted set at `key` with a score between `max` -and `min` (including elements with score equal to `max` or `min`). In contrary -to the default ordering of sorted sets, for this command the elements are -considered to be ordered from high to low scores. +and `min` (including elements with score equal to `max` or `min`). +In contrary to the default ordering of sorted sets, for this command the +elements are considered to be ordered from high to low scores. The elements having the same score are returned in reverse lexicographical order. @@ -9,13 +9,10 @@ order. Apart from the reversed ordering, `ZREVRANGEBYSCORE` is similar to `ZRANGEBYSCORE`. -As per Redis 6.2.0, this command is considered deprecated. Please prefer using -the `ZRANGE` command with the `BYSCORE` and `REV` arguments in new code. - @return -@array-reply: list of elements in the specified score range (optionally with -their scores). +@array-reply: list of elements in the specified score range (optionally +with their scores). @examples diff --git a/iredis/data/commands/zrevrank.md b/iredis/data/commands/zrevrank.md index e85c80c..6c64d98 100644 --- a/iredis/data/commands/zrevrank.md +++ b/iredis/data/commands/zrevrank.md @@ -1,14 +1,15 @@ Returns the rank of `member` in the sorted set stored at `key`, with the scores -ordered from high to low. The rank (or index) is 0-based, which means that the -member with the highest score has rank `0`. +ordered from high to low. +The rank (or index) is 0-based, which means that the member with the highest +score has rank `0`. Use `ZRANK` to get the rank of an element with the scores ordered from low to high. @return -- If `member` exists in the sorted set, @integer-reply: the rank of `member`. -- If `member` does not exist in the sorted set or `key` does not exist, +* If `member` exists in the sorted set, @integer-reply: the rank of `member`. +* If `member` does not exist in the sorted set or `key` does not exist, @bulk-string-reply: `nil`. @examples diff --git a/iredis/data/commands/zscore.md b/iredis/data/commands/zscore.md index f3204d0..8b1e74d 100644 --- a/iredis/data/commands/zscore.md +++ b/iredis/data/commands/zscore.md @@ -5,8 +5,8 @@ returned. @return -@bulk-string-reply: the score of `member` (a double precision floating point -number), represented as string. +@bulk-string-reply: the score of `member` (a double precision floating point number), +represented as string. @examples diff --git a/iredis/data/commands/zunion.md b/iredis/data/commands/zunion.md index 71f737b..d77d81f 100644 --- a/iredis/data/commands/zunion.md +++ b/iredis/data/commands/zunion.md @@ -5,8 +5,8 @@ For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`. @return -@array-reply: the result of union (optionally with their scores, in case the -`WITHSCORES` option is given). +@array-reply: the result of union (optionally with their scores, in case +the `WITHSCORES` option is given). @examples diff --git a/iredis/data/commands/zunionstore.md b/iredis/data/commands/zunionstore.md index 45b4b3b..49e2d50 100644 --- a/iredis/data/commands/zunionstore.md +++ b/iredis/data/commands/zunionstore.md @@ -1,22 +1,23 @@ Computes the union of `numkeys` sorted sets given by the specified keys, and -stores the result in `destination`. It is mandatory to provide the number of -input keys (`numkeys`) before passing the input keys and the other (optional) -arguments. +stores the result in `destination`. +It is mandatory to provide the number of input keys (`numkeys`) before passing +the input keys and the other (optional) arguments. By default, the resulting score of an element is the sum of its scores in the sorted sets where it exists. Using the `WEIGHTS` option, it is possible to specify a multiplication factor -for each input sorted set. This means that the score of every element in every -input sorted set is multiplied by this factor before being passed to the -aggregation function. When `WEIGHTS` is not given, the multiplication factors -default to `1`. +for each input sorted set. +This means that the score of every element in every input sorted set is +multiplied by this factor before being passed to the aggregation function. +When `WEIGHTS` is not given, the multiplication factors default to `1`. With the `AGGREGATE` option, it is possible to specify how the results of the -union are aggregated. This option defaults to `SUM`, where the score of an -element is summed across the inputs where it exists. When this option is set to -either `MIN` or `MAX`, the resulting set will contain the minimum or maximum -score of an element across the inputs where it exists. +union are aggregated. +This option defaults to `SUM`, where the score of an element is summed across +the inputs where it exists. +When this option is set to either `MIN` or `MAX`, the resulting set will contain +the minimum or maximum score of an element across the inputs where it exists. If `destination` already exists, it is overwritten. diff --git a/iredis/entry.py b/iredis/entry.py index 9a07956..c7ae76c 100644 --- a/iredis/entry.py +++ b/iredis/entry.py @@ -237,6 +237,7 @@ Use Redis URL to indicate connection(Can set with env `IREDIS_URL`), Example: """ SHELL = """Allow to run shell commands, default to True.""" PAGER_HELP = """Using pager when output is too tall for your window, default to True.""" +VERIFY_SSL_HELP = """Set the TLS certificate verification strategy""" # command line entry here... @@ -274,6 +275,12 @@ PAGER_HELP = """Using pager when output is too tall for your window, default to @click.option("--shell/--no-shell", default=None, is_flag=True, help=SHELL) @click.option("--pager/--no-pager", default=None, is_flag=True, help=PAGER_HELP) @click.option( + "--verify-ssl", + default=None, + type=click.Choice(["none", "optional", "required"]), + help=VERIFY_SSL_HELP, +) +@click.option( "--prompt", default=None, help=( @@ -302,6 +309,7 @@ def gather_args( socket, shell, pager, + verify_ssl, prompt, ): """ @@ -344,6 +352,8 @@ def gather_args( config.shell = shell if pager is not None: config.enable_pager = pager + if verify_ssl is not None: + config.verify_ssl = verify_ssl return ctx @@ -384,6 +394,7 @@ def create_client(params): password = params["password"] client_name = params["client_name"] prompt = params["prompt"] + verify_ssl = params["verify_ssl"] dsn_from_url = None dsn = params["dsn"] @@ -395,6 +406,7 @@ def create_client(params): if dsn_from_url: # db from command lint options should be high priority db = db if db else dsn_from_url.db + verify_ssl = verify_ssl or dsn_from_url.verify_ssl return Client( host=dsn_from_url.host, port=dsn_from_url.port, @@ -405,6 +417,7 @@ def create_client(params): username=dsn_from_url.username, client_name=client_name, prompt=prompt, + verify_ssl=verify_ssl, ) if params["socket"]: return Client( @@ -424,6 +437,7 @@ def create_client(params): password=password, client_name=client_name, prompt=prompt, + verify_ssl=verify_ssl, ) diff --git a/iredis/redis_grammar.py b/iredis/redis_grammar.py index b9e2ea8..391cdd8 100644 --- a/iredis/redis_grammar.py +++ b/iredis/redis_grammar.py @@ -72,7 +72,7 @@ CONST = { "stream_groups": "GROUPS", "stream_group": "GROUP", "maxlen": "MAXLEN", - "idel": "IDEL", + "idle": "IDLE", "time": "TIME", "retrycount": "RETRYCOUNT", "force": "FORCE", @@ -317,7 +317,7 @@ STREAM_DESTROY = rf"(?P<stream_destroy>{c('stream_destroy')})" STREAM_DELCONSUMER = rf"(?P<stream_delconsumer>{c('stream_delconsumer')})" MAXLEN = rf"(?P<maxlen>{c('maxlen')})" APPROXIMATELY = r"(?P<approximately>~)" -IDEL = rf"(?P<idel>{c('idel')})" +IDLE = rf"(?P<idle>{c('idle')})" TIME = rf"(?P<time>{c('time')})" RETRYCOUNT = rf"(?P<retrycount>{c('retrycount')})" FORCE = rf"(?P<force>{c('force')})" @@ -565,7 +565,7 @@ GRAMMAR = { "command_xclaim": rf""" \s+ {KEY} \s+ {GROUP} \s+ {CONSUMER} \s+ {MILLISECOND} (\s+ {STREAM_ID})+ - (\s+ {IDEL} \s+ {MILLISECOND})? + (\s+ {IDLE} \s+ {MILLISECOND})? (\s+ {TIME} \s+ {TIMESTAMP})? (\s+ {RETRYCOUNT} \s+ {COUNT})? (\s+ {FORCE})? diff --git a/iredis/utils.py b/iredis/utils.py index 593241b..5a61425 100644 --- a/iredis/utils.py +++ b/iredis/utils.py @@ -9,7 +9,6 @@ from prompt_toolkit.formatted_text import FormattedText from iredis.exceptions import InvalidArguments - logger = logging.getLogger(__name__) _last_timer = time.time() @@ -261,7 +260,7 @@ def convert_formatted_text_to_bytes(formatted_text): return "".join(to_render).encode() -DSN = namedtuple("DSN", "scheme host port path db username password") +DSN = namedtuple("DSN", "scheme host port path db username password verify_ssl") def parse_url(url, db=0): @@ -271,7 +270,7 @@ def parse_url(url, db=0): For example:: redis://[[username]:[password]]@localhost:6379/0 - rediss://[[username]:[password]]@localhost:6379/0 + rediss://[[username]:[password]]@localhost:6379/0?ssl_cert_reqs=none unix://[[username]:[password]]@/path/to/socket.sock?db=0 Three URL schemes are supported: @@ -297,6 +296,7 @@ def parse_url(url, db=0): scheme = url.scheme path = unquote(url.path) if url.path else None + verify_ssl = None # We only support redis://, rediss:// and unix:// schemes. # if scheme is ``unix``, read ``db`` from query string # otherwise read ``db`` from path @@ -312,6 +312,13 @@ def parse_url(url, db=0): path = None except (AttributeError, ValueError): pass + qs = parse_qs(url.query) + if "ssl_cert_reqs" in qs: + verify_ssl = qs["ssl_cert_reqs"][0] + if verify_ssl not in ["none", "optional", "required"]: + raise ValueError( + f"ssl_cert_reqs must be one of 'none', 'optional', 'required' or must be omitted: {verify_ssl}" + ) else: valid_schemes = ", ".join(("redis://", "rediss://", "unix://")) raise ValueError( @@ -323,4 +330,4 @@ def parse_url(url, db=0): hostname = unquote(url.hostname) if url.hostname else None port = url.port - return DSN(scheme, hostname, port, path, db, username, password) + return DSN(scheme, hostname, port, path, db, username, password, verify_ssl) diff --git a/poetry.lock b/poetry.lock index f06eaf5..ab14deb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,24 +1,29 @@ [[package]] -name = "atomicwrites" -version = "1.4.0" -description = "Atomic file writes." -category = "dev" +name = "async-timeout" +version = "4.0.2" +description = "Timeout context manager for asyncio programs" +category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" + +[package.dependencies] +typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""} [[package]] name = "attrs" -version = "21.4.0" +version = "22.2.0" description = "Classes Without Boilerplate" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] +tests = ["attrs[tests-no-zope]", "zope.interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests_no_zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "click" @@ -30,11 +35,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "colorama" -version = "0.4.5" +version = "0.4.6" description = "Cross-platform colored terminal text." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" [[package]] name = "configobj" @@ -48,36 +53,47 @@ python-versions = "*" six = "*" [[package]] +name = "exceptiongroup" +version = "1.1.0" +description = "Backport of PEP 654 (exception groups)" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] name = "importlib-metadata" -version = "4.8.3" +version = "5.2.0" description = "Read metadata from Python packages" -category = "dev" +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "importlib-resources" -version = "5.4.0" +version = "5.10.2" description = "Read resources from Python packages" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "iniconfig" @@ -89,7 +105,7 @@ python-versions = "*" [[package]] name = "mistune" -version = "2.0.3" +version = "2.0.4" description = "A sane Markdown parser with useful plugins and renderers" category = "main" optional = false @@ -146,11 +162,11 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "prompt-toolkit" -version = "3.0.3" +version = "3.0.36" description = "Library for building powerful interactive command lines in Python" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.2" [package.dependencies] wcwidth = "*" @@ -164,53 +180,47 @@ optional = false python-versions = "*" [[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "pygments" -version = "2.12.0" +name = "Pygments" +version = "2.13.0" description = "Pygments is a syntax highlighting package written in Python." category = "main" optional = false python-versions = ">=3.6" +[package.extras] +plugins = ["importlib-metadata"] + [[package]] name = "pyparsing" -version = "3.0.7" -description = "Python parsing module" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.8" [package.extras] diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "6.2.5" +version = "7.2.0" description = "pytest: simple powerful testing with Python" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" -py = ">=1.8.2" -toml = "*" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] [[package]] name = "python-dateutil" @@ -233,14 +243,20 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "redis" -version = "3.5.3" -description = "Python client for Redis key-value store" +version = "4.4.0" +description = "Python client for Redis database and key-value store" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" + +[package.dependencies] +async-timeout = ">=4.0.2" +importlib-metadata = {version = ">=1.0", markers = "python_version < \"3.8\""} +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -hiredis = ["hiredis (>=0.1.3)"] +hiredis = ["hiredis (>=1.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] [[package]] name = "six" @@ -251,20 +267,20 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] -name = "toml" -version = "0.10.2" -description = "Python Library for Tom's Obvious, Minimal Language" +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" category = "dev" optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = ">=3.7" [[package]] name = "typing-extensions" -version = "4.1.1" -description = "Backported and Experimental Type Hints for Python 3.6+" -category = "dev" +version = "4.4.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "wcwidth" @@ -276,56 +292,60 @@ python-versions = "*" [[package]] name = "zipp" -version = "3.6.0" +version = "3.11.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [metadata] lock-version = "1.1" -python-versions = "^3.6" -content-hash = "67508010829a152ee8dd27cefc16d1d32f1caad85250721a8d9d18fb37825e5f" +python-versions = "^3.7" +content-hash = "6ad42fae2cc7eab35a27500c86a45fffef1d406d3f324f173c56aee107d8a480" [metadata.files] -atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +async-timeout = [ + {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, + {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, ] attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, ] click = [ {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, ] colorama = [ - {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, - {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] configobj = [ {file = "configobj-5.0.6.tar.gz", hash = "sha256:a2f5650770e1c87fb335af19a9b7eb73fc05ccf22144eb68db7d00cd2bcb0902"}, ] +exceptiongroup = [ + {file = "exceptiongroup-1.1.0-py3-none-any.whl", hash = "sha256:327cbda3da756e2de031a3107b81ab7b3770a602c4d16ca618298c526f4bec1e"}, + {file = "exceptiongroup-1.1.0.tar.gz", hash = "sha256:bcb67d800a4497e1b404c2dd44fca47d3b7a5e5433dbab67f96c1a685cdfdf23"}, +] importlib-metadata = [ - {file = "importlib_metadata-4.8.3-py3-none-any.whl", hash = "sha256:65a9576a5b2d58ca44d133c42a241905cc45e34d2c06fd5ba2bafa221e5d7b5e"}, - {file = "importlib_metadata-4.8.3.tar.gz", hash = "sha256:766abffff765960fcc18003801f7044eb6755ffae4521c8e8ce8e83b9c9b0668"}, + {file = "importlib_metadata-5.2.0-py3-none-any.whl", hash = "sha256:0eafa39ba42bf225fc00e67f701d71f85aead9f878569caf13c3724f704b970f"}, + {file = "importlib_metadata-5.2.0.tar.gz", hash = "sha256:404d48d62bba0b7a77ff9d405efd91501bef2e67ff4ace0bed40a0cf28c3c7cd"}, ] importlib-resources = [ - {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"}, - {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"}, + {file = "importlib_resources-5.10.2-py3-none-any.whl", hash = "sha256:7d543798b0beca10b6a01ac7cafda9f822c54db9e8376a6bf57e0cbd74d486b6"}, + {file = "importlib_resources-5.10.2.tar.gz", hash = "sha256:e4a96c8cc0339647ff9a5e0550d9f276fc5a01ffa276012b58ec108cfd7b8484"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] mistune = [ - {file = "mistune-2.0.3-py2.py3-none-any.whl", hash = "sha256:e3964140c0775535fba50bd616fe180920044a64bc21850253267b07bff89924"}, - {file = "mistune-2.0.3.tar.gz", hash = "sha256:d7605b46b6156b53b7d52a465202b29a6f00f4ea4130ad5d25e9d5547d6b7e50"}, + {file = "mistune-2.0.4-py2.py3-none-any.whl", hash = "sha256:182cc5ee6f8ed1b807de6b7bb50155df7b66495412836b9a74c8fbdfc75fe36d"}, + {file = "mistune-2.0.4.tar.gz", hash = "sha256:9ee0a66053e2267aba772c71e06891fa8f1af6d4b01d5e84e267b4570d4d9808"}, ] packaging = [ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, @@ -363,28 +383,24 @@ pluggy = [ {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.3-py3-none-any.whl", hash = "sha256:c93e53af97f630f12f5f62a3274e79527936ed466f038953dfa379d4941f651a"}, - {file = "prompt_toolkit-3.0.3.tar.gz", hash = "sha256:a402e9bf468b63314e37460b68ba68243d55b2f8c4d0192f85a019af3945050e"}, + {file = "prompt_toolkit-3.0.36-py3-none-any.whl", hash = "sha256:aa64ad242a462c5ff0363a7b9cfe696c20d55d9fc60c11fd8e632d064804d305"}, + {file = "prompt_toolkit-3.0.36.tar.gz", hash = "sha256:3e163f254bef5a03b146397d7c1963bd3e2812f0964bb9a24e6ec761fd28db63"}, ] ptyprocess = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] -py = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] -pygments = [ - {file = "Pygments-2.12.0-py3-none-any.whl", hash = "sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"}, - {file = "Pygments-2.12.0.tar.gz", hash = "sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb"}, +Pygments = [ + {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, + {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, ] pyparsing = [ - {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, - {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, ] pytest = [ - {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, - {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, + {file = "pytest-7.2.0-py3-none-any.whl", hash = "sha256:892f933d339f068883b6fd5a459f03d85bfcb355e4981e146d2c7616c21fef71"}, + {file = "pytest-7.2.0.tar.gz", hash = "sha256:c4014eb40e10f11f355ad4e3c2fb2c6c6d1919c73f3b5a433de4708202cade59"}, ] python-dateutil = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, @@ -395,26 +411,26 @@ pytzdata = [ {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, ] redis = [ - {file = "redis-3.5.3-py2.py3-none-any.whl", hash = "sha256:432b788c4530cfe16d8d943a09d40ca6c16149727e4afe8c2c9d5580c59d9f24"}, - {file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"}, + {file = "redis-4.4.0-py3-none-any.whl", hash = "sha256:cae3ee5d1f57d8caf534cd8764edf3163c77e073bdd74b6f54a87ffafdc5e7d9"}, + {file = "redis-4.4.0.tar.gz", hash = "sha256:7b8c87d19c45d3f1271b124858d2a5c13160c4e74d4835e28273400fa34d5228"}, ] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] -toml = [ - {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, - {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +tomli = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] typing-extensions = [ - {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"}, - {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"}, + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, ] wcwidth = [ {file = "wcwidth-0.1.9-py2.py3-none-any.whl", hash = "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1"}, {file = "wcwidth-0.1.9.tar.gz", hash = "sha256:ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1"}, ] zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, + {file = "zipp-3.11.0-py3-none-any.whl", hash = "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa"}, + {file = "zipp-3.11.0.tar.gz", hash = "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"}, ] diff --git a/pyproject.toml b/pyproject.toml index 87bb464..c9a676a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "iredis" -version = "1.12.1" +version = "1.13.0" description = "Terminal client for Redis with auto-completion and syntax highlighting." authors = ["laixintao <laixintao1995@163.com>"] readme = 'README.md' @@ -15,7 +15,6 @@ classifiers = [ "Environment :: Console :: Curses", "Environment :: MacOS X", "Operating System :: OS Independent", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", @@ -31,8 +30,7 @@ packages = [ ] [tool.poetry.dependencies] -python = "^3.6" -redis = "^3.4.0" +python = "^3.7" prompt_toolkit = "^3" Pygments = "^2" mistune = "^2.0" @@ -43,9 +41,10 @@ importlib-resources = "^5.1.0" # wcwidth 0.2.x uses pkg_resources which is not supported by PyOxidizer wcwidth = "0.1.9" packaging = "^21.3" +redis = "^4.3.4" [tool.poetry.dev-dependencies] -pytest = "^6" +pytest = "^7.2" pexpect = "^4.7" [tool.poetry.scripts] diff --git a/scripts/add_hash.txt b/scripts/add_hash.txt index fea3c7a..b42e716 100644 --- a/scripts/add_hash.txt +++ b/scripts/add_hash.txt @@ -1,3 +1,3 @@ -HMSET hash1 Tolerant intolerant Decent indecent Discreet indiscreet Excusable inexcusable +HMSET hash1 Tolerant intolerant Decent indecent Discrete indiscreet Excusable inexcusable HMSET hash2 Behave misbehave Interpret misinterpret Lead mislead Trust mistrust Likely unlikely Able unable Fortunate unfortunate Forgiving unforgiving HMSET hash3 Entity nonentity Conformist nonconformist Payment nonpayment Sense nonsense diff --git a/tests/unittests/command_parse/test_stream.py b/tests/unittests/command_parse/test_stream.py index 94408df..82f8670 100644 --- a/tests/unittests/command_parse/test_stream.py +++ b/tests/unittests/command_parse/test_stream.py @@ -333,7 +333,7 @@ def test_xclaim(judge_command): }, ) judge_command( - "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300", + "XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDLE 300", { "command": "XCLAIM", "key": "mystream", @@ -341,7 +341,7 @@ def test_xclaim(judge_command): "consumer": "Alice", "millisecond": ["3600000", "300"], "stream_id": "1526569498055-0", - "idel": "IDEL", + "idle": "IDLE", }, ) judge_command( diff --git a/tests/unittests/test_entry.py b/tests/unittests/test_entry.py index bfc7bec..912aabf 100644 --- a/tests/unittests/test_entry.py +++ b/tests/unittests/test_entry.py @@ -107,6 +107,7 @@ def test_command_shell_options_higher_priority(): db=3, username=None, password=None, + verify_ssl=None, ), ), ( @@ -119,6 +120,7 @@ def test_command_shell_options_higher_priority(): db=0, username=None, password=None, + verify_ssl=None, ), ), ( @@ -131,6 +133,20 @@ def test_command_shell_options_higher_priority(): db=0, username=None, password=None, + verify_ssl=None, + ), + ), + ( + "rediss://localhost:6379/1?ssl_cert_reqs=optional", + DSN( + scheme="rediss", + host="localhost", + port=6379, + path=None, + db=1, + username=None, + password=None, + verify_ssl="optional", ), ), ( @@ -143,6 +159,7 @@ def test_command_shell_options_higher_priority(): db=0, username="username", password="password", + verify_ssl=None, ), ), ( @@ -155,6 +172,7 @@ def test_command_shell_options_higher_priority(): db=0, username=None, password="password", + verify_ssl=None, ), ), ( @@ -167,6 +185,7 @@ def test_command_shell_options_higher_priority(): db=0, username="username", password=None, + verify_ssl=None, ), ), ( @@ -180,6 +199,7 @@ def test_command_shell_options_higher_priority(): db=0, username="username", password=None, + verify_ssl=None, ), ), ( @@ -192,6 +212,7 @@ def test_command_shell_options_higher_priority(): db=0, username="username", password="password2", + verify_ssl=None, ), ), ( @@ -204,6 +225,7 @@ def test_command_shell_options_higher_priority(): db=0, username=None, password="password3", + verify_ssl=None, ), ), ( @@ -216,6 +238,7 @@ def test_command_shell_options_higher_priority(): db=0, username=None, password=None, + verify_ssl=None, ), ), ], |