summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-17 07:34:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-17 07:34:57 +0000
commitd71fd6264d58795c50b9350d7c39677b671e0896 (patch)
tree0aa9e0bd923a00b9ddda4e01af55a76ed314097c
parentReleasing debian version 1.9.1-3. (diff)
downloadiredis-d71fd6264d58795c50b9350d7c39677b671e0896.tar.xz
iredis-d71fd6264d58795c50b9350d7c39677b671e0896.zip
Merging upstream version 1.9.4.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.bumpversion.cfg3
-rw-r--r--.github/workflows/release.yaml10
-rw-r--r--.github/workflows/test-binary-build.yaml77
-rw-r--r--.github/workflows/test.yaml14
-rw-r--r--.gitignore3
-rw-r--r--CHANGELOG.md25
-rw-r--r--README.md10
-rw-r--r--iredis/__init__.py2
-rw-r--r--iredis/bottom.py4
-rw-r--r--iredis/client.py59
-rw-r--r--iredis/commands.py3
-rw-r--r--iredis/completers.py2
-rw-r--r--iredis/data/command_syntax.csv6
-rw-r--r--iredis/data/commands.json1423
-rw-r--r--iredis/data/commands/acl-getuser.md6
-rw-r--r--iredis/data/commands/acl-list.md4
-rw-r--r--iredis/data/commands/acl-setuser.md17
-rw-r--r--iredis/data/commands/auth.md4
-rw-r--r--iredis/data/commands/bitfield.md13
-rw-r--r--iredis/data/commands/blmove.md23
-rw-r--r--iredis/data/commands/blpop.md6
-rw-r--r--iredis/data/commands/brpop.md4
-rw-r--r--iredis/data/commands/brpoplpush.md7
-rw-r--r--iredis/data/commands/bzpopmax.md6
-rw-r--r--iredis/data/commands/bzpopmin.md6
-rw-r--r--iredis/data/commands/client-caching.md2
-rw-r--r--iredis/data/commands/client-info.md16
-rw-r--r--iredis/data/commands/client-kill.md36
-rw-r--r--iredis/data/commands/client-list.md28
-rw-r--r--iredis/data/commands/client-pause.md38
-rw-r--r--iredis/data/commands/client-tracking.md4
-rw-r--r--iredis/data/commands/client-trackinginfo.md25
-rw-r--r--iredis/data/commands/client-unpause.md6
-rw-r--r--iredis/data/commands/cluster-addslots.md2
-rw-r--r--iredis/data/commands/cluster-delslots.md2
-rw-r--r--iredis/data/commands/cluster-flushslots.md2
-rw-r--r--iredis/data/commands/cluster-nodes.md9
-rw-r--r--iredis/data/commands/cluster-setslot.md7
-rw-r--r--iredis/data/commands/command.md6
-rw-r--r--iredis/data/commands/config-set.md2
-rw-r--r--iredis/data/commands/copy.md24
-rw-r--r--iredis/data/commands/eval.md27
-rw-r--r--iredis/data/commands/eval_ro.md19
-rw-r--r--iredis/data/commands/evalsha_ro.md6
-rw-r--r--iredis/data/commands/expiretime.md22
-rw-r--r--iredis/data/commands/failover.md84
-rw-r--r--iredis/data/commands/flushall.md25
-rw-r--r--iredis/data/commands/flushdb.md15
-rw-r--r--iredis/data/commands/geoadd.md64
-rw-r--r--iredis/data/commands/georadius.md19
-rw-r--r--iredis/data/commands/georadiusbymember.md3
-rw-r--r--iredis/data/commands/geosearch.md77
-rw-r--r--iredis/data/commands/geosearchstore.md11
-rw-r--r--iredis/data/commands/getdel.md16
-rw-r--r--iredis/data/commands/getex.md28
-rw-r--r--iredis/data/commands/getset.md7
-rw-r--r--iredis/data/commands/hello.md83
-rw-r--r--iredis/data/commands/hmset.md2
-rw-r--r--iredis/data/commands/hrandfield.md50
-rw-r--r--iredis/data/commands/incr.md14
-rw-r--r--iredis/data/commands/info.md82
-rw-r--r--iredis/data/commands/latency-reset.md2
-rw-r--r--iredis/data/commands/lmove.md77
-rw-r--r--iredis/data/commands/lpop.md21
-rw-r--r--iredis/data/commands/lpos.md12
-rw-r--r--iredis/data/commands/memory-usage.md2
-rw-r--r--iredis/data/commands/migrate.md9
-rw-r--r--iredis/data/commands/module-load.md2
-rw-r--r--iredis/data/commands/monitor.md7
-rw-r--r--iredis/data/commands/pexpiretime.md19
-rw-r--r--iredis/data/commands/publish.md8
-rw-r--r--iredis/data/commands/pubsub.md6
-rw-r--r--iredis/data/commands/reset.md23
-rw-r--r--iredis/data/commands/rpop.md21
-rw-r--r--iredis/data/commands/rpoplpush.md3
-rw-r--r--iredis/data/commands/sadd.md2
-rw-r--r--iredis/data/commands/scan.md2
-rw-r--r--iredis/data/commands/script-debug.md4
-rw-r--r--iredis/data/commands/script-flush.md15
-rw-r--r--iredis/data/commands/set.md27
-rw-r--r--iredis/data/commands/setbit.md5
-rw-r--r--iredis/data/commands/setnx.md2
-rw-r--r--iredis/data/commands/slowlog.md2
-rw-r--r--iredis/data/commands/smismember.md17
-rw-r--r--iredis/data/commands/spop.md30
-rw-r--r--iredis/data/commands/srandmember.md51
-rw-r--r--iredis/data/commands/stralgo.md12
-rw-r--r--iredis/data/commands/subscribe.md6
-rw-r--r--iredis/data/commands/xack.md16
-rw-r--r--iredis/data/commands/xadd.md53
-rw-r--r--iredis/data/commands/xautoclaim.md70
-rw-r--r--iredis/data/commands/xclaim.md9
-rw-r--r--iredis/data/commands/xgroup.md31
-rw-r--r--iredis/data/commands/xinfo.md3
-rw-r--r--iredis/data/commands/xpending.md46
-rw-r--r--iredis/data/commands/xrange.md52
-rw-r--r--iredis/data/commands/xreadgroup.md20
-rw-r--r--iredis/data/commands/xrevrange.md52
-rw-r--r--iredis/data/commands/xtrim.md69
-rw-r--r--iredis/data/commands/zadd.md20
-rw-r--r--iredis/data/commands/zdiff.md19
-rw-r--r--iredis/data/commands/zdiffstore.md24
-rw-r--r--iredis/data/commands/zinter.md21
-rw-r--r--iredis/data/commands/zmscore.md18
-rw-r--r--iredis/data/commands/zrandmember.md50
-rw-r--r--iredis/data/commands/zrange.md144
-rw-r--r--iredis/data/commands/zrangebylex.md3
-rw-r--r--iredis/data/commands/zrangebyscore.md3
-rw-r--r--iredis/data/commands/zrangestore.md14
-rw-r--r--iredis/data/commands/zremrangebylex.md2
-rw-r--r--iredis/data/commands/zrevrange.md3
-rw-r--r--iredis/data/commands/zrevrangebylex.md3
-rw-r--r--iredis/data/commands/zrevrangebyscore.md3
-rw-r--r--iredis/data/commands/zunion.md21
-rw-r--r--iredis/entry.py19
-rw-r--r--iredis/redis_grammar.py2
-rw-r--r--iredis/renders.py18
-rw-r--r--iredis/utils.py13
-rw-r--r--poetry.lock306
-rw-r--r--pyoxidizer.template.bzl329
-rw-r--r--pyproject.toml5
-rw-r--r--tests/cli_tests/__init__.py0
-rw-r--r--tests/cli_tests/test_cli_start.py8
-rw-r--r--tests/cli_tests/test_command_input.py21
-rw-r--r--tests/cli_tests/test_shell_pipeline.py10
-rw-r--r--tests/helpers.py11
-rw-r--r--tests/unittests/__init__.py0
-rw-r--r--tests/unittests/command_parse/test_cluster.py36
-rw-r--r--tests/unittests/test_client.py102
-rw-r--r--tests/unittests/test_render_functions.py6
-rw-r--r--tests/unittests/test_utils.py6
131 files changed, 3852 insertions, 796 deletions
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 1edc391..5a78ab2 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,9 +1,8 @@
[bumpversion]
-current_version = 1.9.1
+current_version = 1.9.4
commit = True
tag = True
[bumpversion:file:iredis/__init__.py]
[bumpversion:file:pyproject.toml]
-
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index d0cc7df..27b05c7 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -89,7 +89,7 @@ jobs:
run: |
python3 -m venv venv
. venv/bin/activate
- pip install -U pip
+ pip install pip==21.1
pip install poetry
poetry install
python -c "import sys; print(sys.version)"
@@ -107,18 +107,12 @@ jobs:
iredis -h
iredis help GET
- - name: Cache cargo registry
- uses: actions/cache@v1
- with:
- path: ~/.cargo/registry
- key: ${{ runner.os }}-cargo-registry
-
- name: Executable Build
run: |
# pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
export WHEEL_PATH=`ls ./dist/iredis*.whl`
envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
- cargo install pyoxidizer --vers 0.6.0
+ pip install pyoxidizer
pyoxidizer build --release install
cd ./build/x86*/release/install
tar -zcf ../../../iredis.tar.gz lib/ iredis
diff --git a/.github/workflows/test-binary-build.yaml b/.github/workflows/test-binary-build.yaml
new file mode 100644
index 0000000..a71d576
--- /dev/null
+++ b/.github/workflows/test-binary-build.yaml
@@ -0,0 +1,77 @@
+name: Test binary build.
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
+
+jobs:
+ test-release-binary:
+ name: Test Build Executable Binary. You can download from Artifact after building.
+ runs-on: ubuntu-16.04
+
+ # FIXME
+ # help test shouldn't depends on this to run
+ services:
+ redis:
+ image: redis
+ ports:
+ - 6379:6379
+ options: --entrypoint redis-server
+
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v1
+ with:
+ python-version: 3.7
+ architecture: 'x64'
+ - name: Cache venv
+ uses: actions/cache@v1
+ with:
+ path: venv
+ # Look to see if there is a cache hit for the corresponding requirements file
+ key: ubuntu-16.04-poetryenv-${{ hashFiles('poetry.lock') }}
+ - name: Install Dependencies
+ run: |
+ python3 -m venv venv
+ . venv/bin/activate
+ pip install pip==21.1
+ pip install poetry
+ poetry install
+ python -c "import sys; print(sys.version)"
+ pip list
+ - name: Poetry Build
+ run: |
+ . venv/bin/activate
+ poetry build
+ - name: Test Build
+ run: |
+ python3 -m venv fresh_env
+ . fresh_env/bin/activate
+ pip install dist/*.whl
+
+ iredis -h
+ iredis help GET
+
+ - name: Executable Build
+ run: |
+ # pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
+ export WHEEL_PATH=`ls ./dist/iredis*.whl`
+ envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
+ pip install pyoxidizer
+ pyoxidizer build --release install
+ cd ./build/x86*/release/install
+ tar -zcf ../../../iredis.tar.gz lib/ iredis
+ cd -
+
+ - name: Test Executable
+ run: |
+ ./build/x86*/release/install/iredis -h
+ ./build/x86*/release/install/iredis help GET
+
+ - name: Upload Release Asset to Github Artifact
+ uses: actions/upload-artifact@v2
+ with:
+ name: iredis-${{github.sha}}.tar.gz
+ path: ./build/iredis.tar.gz
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 1c0a360..ede6dd3 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -12,7 +12,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-16.04]
- python: ['3.6', '3.7', '3.8']
+ python: ['3.6', '3.7', '3.8', '3.9']
redis: [5, 6]
runs-on: ${{ matrix.os }}
@@ -25,21 +25,21 @@ jobs:
steps:
- uses: actions/checkout@v2
- - uses: actions/setup-python@v1
+ - uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
architecture: 'x64'
- name: Cache venv
- uses: actions/cache@v1
+ uses: actions/cache@v2
with:
path: venv
# Look to see if there is a cache hit for the corresponding requirements file
- key: ${{ matrix.os }}-poetryenv-${{ hashFiles('poetry.lock') }}
+ key: poetryenv-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('poetry.lock') }}
- name: Install Dependencies
run: |
python3 -m venv venv
. venv/bin/activate
- pip install -U pip
+ pip install -U pip==21.1 setuptools
pip install poetry
poetry install
python -c "import sys; print(sys.version)"
@@ -49,7 +49,7 @@ jobs:
REDIS_VERSION: ${{ matrix.redis }}
run: |
. venv/bin/activate
- pytest
+ pytest || cat cli_test.log
lint:
name: flake8 & black
runs-on: ubuntu-16.04
@@ -61,7 +61,7 @@ jobs:
python-version: 3.7
architecture: 'x64'
- name: Cache venv
- uses: actions/cache@v1
+ uses: actions/cache@v2
with:
path: venv
# Look to see if there is a cache hit for the corresponding requirements file
diff --git a/.gitignore b/.gitignore
index 86b0f1a..eb6314a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -105,4 +105,5 @@ venv.bak/
*.aof
# IDE
-.vscode \ No newline at end of file
+.vscode
+.idea/ \ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b565097..14aa9aa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,26 @@
+## 1.10
+
+- Feature: more human readable output for `HELP` command like `ACL HELP` and
+ `MEMORY HELP`.
+- Feature: you can use <kbd>Ctrl</kbd> + <kbd>C</kbd> to cancel a blocking
+ command like `BLPOP`.
+
+### 1.9.4
+
+- Bugfix: respect newbie_mode set in config, if cli flag is missing. thanks to [sid-maddy]
+
+### 1.9.3
+
+- Bugfix: When IRedis start with `--decode=utf-8`, command with shell pipe will
+ fail. ( [#383](https://github.com/laixintao/iredis/issues/383)). Thanks to
+ [hanaasagi].
+
+### 1.9.2
+
+- Bugfix: before `cluster` commands' `node-id` only accept numbers, not it's
+ fixed. `node-id` can be `\w+`.
+- Feature: support set client name for iredis connections via `--client-name`.
+
### 1.9.1
- Feature: support auto-reissue command to another Redis server, when got a
@@ -216,3 +239,5 @@
[lyqscmy]: https://github.com/lyqscmy
[brianmaissy]: https://github.com/brianmaissy
[otms61]: https://github.com/otms61
+[hanaasagi]: https://github.com/Hanaasagi
+[sid-maddy]: https://github.com/sid-maddy
diff --git a/README.md b/README.md
index fff2cbe..c6f60e8 100644
--- a/README.md
+++ b/README.md
@@ -2,12 +2,12 @@
<img width="100" height="100" src="https://raw.githubusercontent.com/laixintao/iredis/master/docs/assets/logo.png" />
</p>
-<h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h4>
+<h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h3>
<p align="center">
<a href="https://github.com/laixintao/iredis/actions"><img src="https://github.com/laixintao/iredis/workflows/Test/badge.svg" alt="Github Action"></a>
<a href="https://badge.fury.io/py/iredis"><img src="https://badge.fury.io/py/iredis.svg" alt="PyPI version"></a>
-<img src="https://badgen.net/badge/python/3.6%20|%203.7%20|%203.8/" alt="Python version">
+<img src="https://badgen.net/badge/python/3.6%20%7C%203.7%20%7C%203.8%20%7C%203.9/" alt="Python version">
<a href="https://pepy.tech/project/iredis"><img src="https://pepy.tech/badge/iredis" alt="Download stats"></a>
<a href="https://t.me/iredis_users"><img src="https://badgen.net/badge/icon/join?icon=telegram&amp;label=usergroup" alt="Chat on telegram"></a>
<a href="https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/laixintao/iredis&amp;cloudshell_print=docs/cloudshell/run-in-docker.txt"><img src="https://badgen.net/badge/run/GoogleCloudShell/blue?icon=terminal" alt="Open in Cloud Shell"></a>
@@ -145,7 +145,7 @@ like <kbd>Ctrl</kbd> + <kbd>F</kbd> to forward work.
Also:
-- <kbd>Ctrl</kbd> + <kbd>F</kbd> (i.e. EOF) to exit; you can also use the `exit`
+- <kbd>Ctrl</kbd> + <kbd>D</kbd> (i.e. EOF) to exit; you can also use the `exit`
command.
- <kbd>Ctrl</kbd> + <kbd>L</kbd> to clear screen; you can also use the `clear`
command.
@@ -156,8 +156,8 @@ Also:
### Release Strategy
-IRedis is built and released by CircleCI. Whenever a tag is pushed to the
-`master` branch, a new release is built and uploaded to pypi.org, it's very
+IRedis is built and released by `GitHub Actions`. Whenever a tag is pushed to
+the `master` branch, a new release is built and uploaded to pypi.org, it's very
convenient.
Thus, we release as often as possible, so that users can always enjoy the new
diff --git a/iredis/__init__.py b/iredis/__init__.py
index 38cf6db..8445203 100644
--- a/iredis/__init__.py
+++ b/iredis/__init__.py
@@ -1 +1 @@
-__version__ = "1.9.1"
+__version__ = "1.9.4"
diff --git a/iredis/bottom.py b/iredis/bottom.py
index 28746f8..17c3af3 100644
--- a/iredis/bottom.py
+++ b/iredis/bottom.py
@@ -24,12 +24,12 @@ class BottomToolbar:
def render(self):
text = BUTTOM_TEXT
- # add command help if valide
+ # add command help if valid
if self.command_holder.command:
try:
command_info = commands_summary[self.command_holder.command]
text = command_syntax(self.command_holder.command, command_info)
except KeyError as e:
logger.exception(e)
- pass
+
return text
diff --git a/iredis/client.py b/iredis/client.py
index 61613dc..2c9bda5 100644
--- a/iredis/client.py
+++ b/iredis/client.py
@@ -62,6 +62,7 @@ class Client:
path=None,
scheme="redis",
username=None,
+ client_name=None,
):
self.host = host
self.port = port
@@ -69,17 +70,11 @@ class Client:
self.path = path
# FIXME username is not using...
self.username = username
+ self.client_name = client_name
self.scheme = scheme
+ self.password = password
- self.connection = self.create_connection(
- host,
- port,
- db,
- password,
- path,
- scheme,
- username,
- )
+ self.build_connection()
# all command upper case
self.answer_callbacks = command2callback
@@ -101,6 +96,21 @@ class Client:
if config.version and re.match(r"([\d\.]+)", config.version):
self.auth_compat(config.version)
+ def build_connection(self):
+ """
+ create a new connection and replace ``self.connection``
+ """
+ self.connection = self.create_connection(
+ self.host,
+ self.port,
+ self.db,
+ self.password,
+ self.path,
+ self.scheme,
+ self.username,
+ client_name=self.client_name,
+ )
+
def create_connection(
self,
host=None,
@@ -110,6 +120,7 @@ class Client:
path=None,
scheme="redis",
username=None,
+ client_name=None,
):
if scheme in ("redis", "rediss"):
connection_kwargs = {
@@ -118,13 +129,19 @@ class Client:
"db": db,
"password": password,
"socket_keepalive": config.socket_keepalive,
+ "client_name": client_name,
}
if scheme == "rediss":
connection_class = SSLConnection
else:
connection_class = Connection
else:
- connection_kwargs = {"db": db, "password": password, "path": path}
+ connection_kwargs = {
+ "db": db,
+ "password": password,
+ "path": path,
+ "client_name": client_name,
+ }
connection_class = UnixDomainSocketConnection
if config.decode:
@@ -242,6 +259,15 @@ class Client:
except redis.exceptions.ExecAbortError:
config.transaction = False
raise
+ except KeyboardInterrupt:
+ logger.warning("received KeyboardInterrupt... rebuild connection...")
+ connection.disconnect()
+ connection.connect()
+ print(
+ "KeyboardInterrupt received! User canceled reading response!",
+ file=sys.stderr,
+ )
+ return None
else:
return response
raise last_error
@@ -338,7 +364,7 @@ class Client:
grammar = completer.get_completer(input_text=rawinput).compiled_grammar
matched = grammar.match(rawinput)
if not matched:
- # invalide command!
+ # invalid command!
return rawinput, None
variables = matched.variables()
shell_command = variables.get("shellcommand")
@@ -397,12 +423,7 @@ class Client:
# subcommand's stdout/stderr
if shell_command and config.shell:
# pass the raw response of redis to shell command
- if isinstance(redis_resp, list):
- # FIXME not handling nested list, use renders.render_raw
- # instead
- stdin = b"\n".join(redis_resp)
- else:
- stdin = redis_resp
+ stdin = OutputRender.render_raw(redis_resp)
run(shell_command, input=stdin, shell=True)
return
@@ -486,7 +507,7 @@ class Client:
redis_grammar = completer.get_completer(command).compiled_grammar
m = redis_grammar.match(command)
if not m:
- # invalide command!
+ # invalid command!
return
variables = m.variables()
# zset withscores
@@ -501,7 +522,7 @@ class Client:
doc = read_text(commands_data, f"{command_docs_name}.md")
except FileNotFoundError:
raise NotRedisCommand(
- f"{command_summary_name} is not a valide Redis command."
+ f"{command_summary_name} is not a valid Redis command."
)
rendered_detail = markdown.render(doc)
summary_dict = commands_summary[command_summary_name]
diff --git a/iredis/commands.py b/iredis/commands.py
index 358c8b7..dcd457f 100644
--- a/iredis/commands.py
+++ b/iredis/commands.py
@@ -94,7 +94,6 @@ commands_summary.update(
"PEEK": {
"summary": "Get the key's type and value.",
"arguments": [{"name": "key", "type": "key"}],
- "since": "1.0",
"complexity": "O(1).",
"since": "1.0",
"group": "iredis",
@@ -135,7 +134,7 @@ def split_command_args(command):
input_args = command[matcher.end() :]
break
else:
- raise InvalidArguments(f"`{command}` is not a valide Redis Command")
+ raise InvalidArguments(f"`{command}` is not a valid Redis Command")
args = list(strip_quote_args(input_args))
diff --git a/iredis/completers.py b/iredis/completers.py
index 6756637..afdc7ef 100644
--- a/iredis/completers.py
+++ b/iredis/completers.py
@@ -191,7 +191,7 @@ class IRedisCompleter(Completer):
grammar = completer.compiled_grammar
m = grammar.match(command)
if not m:
- # invalide command!
+ # invalid command!
return
variables = m.variables()
diff --git a/iredis/data/command_syntax.csv b/iredis/data/command_syntax.csv
index e29c2c0..4036ff0 100644
--- a/iredis/data/command_syntax.csv
+++ b/iredis/data/command_syntax.csv
@@ -123,7 +123,7 @@ server,ACL CAT,command_categorynamex,render_list
server,ACL DELUSER,command_usernames,render_int
server,ACL GENPASS,command_countx,render_bulk_string
server,ACL GETUSER,command_username,render_list
-server,ACL HELP,command,render_list
+server,ACL HELP,command,render_help
server,ACL LIST,command,render_list
server,ACL LOAD,command,render_simple_string
server,ACL LOG,command_count_or_resetx,render_list_or_string
@@ -152,12 +152,12 @@ server,LOLWUT,command_version,render_bytes
server,LASTSAVE,command,render_unixtime
server,LATENCY DOCTOR,command,render_bulk_string_decode
server,LATENCY GRAPH,command_graphevent,render_bulk_string_decode
-server,LATENCY HELP,command,render_list
+server,LATENCY HELP,command,render_help
server,LATENCY HISTORY,command_graphevent,render_list
server,LATENCY LATEST,command,render_list
server,LATENCY RESET,command_graphevents,render_int
server,MEMORY DOCTOR,command,render_bulk_string_decode
-server,MEMORY HELP,command,render_list
+server,MEMORY HELP,command,render_help
server,MEMORY MALLOC-STATS,command,render_bulk_string_decode
server,MEMORY PURGE,command,render_simple_string
server,MEMORY STATS,command,render_nested_pair
diff --git a/iredis/data/commands.json b/iredis/data/commands.json
index ae05b80..1b0ae22 100644
--- a/iredis/data/commands.json
+++ b/iredis/data/commands.json
@@ -23,14 +23,31 @@
"since": "6.0.0",
"group": "server"
},
+ "ACL GETUSER": {
+ "summary": "Get the rules for a specific ACL user",
+ "complexity": "O(N). Where N is the number of password, command and pattern rules that the user has.",
+ "arguments": [
+ {
+ "name": "username",
+ "type": "string"
+ }
+ ],
+ "since": "6.0.0",
+ "group": "server"
+ },
"ACL SETUSER": {
"summary": "Modify or create the rules for a specific ACL user",
"complexity": "O(N). Where N is the number of rules provided.",
"arguments": [
{
+ "name": "username",
+ "type": "string"
+ },
+ {
"name": "rule",
"type": "string",
- "multiple": true
+ "multiple": true,
+ "optional": true
}
],
"since": "6.0.0",
@@ -94,6 +111,12 @@
"since": "6.0.0",
"group": "server"
},
+ "ACL HELP": {
+ "summary": "Show helpful text about the different subcommands",
+ "complexity": "O(1)",
+ "since": "6.0.0",
+ "group": "server"
+ },
"APPEND": {
"summary": "Append a value to a key",
"complexity": "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation.",
@@ -114,6 +137,11 @@
"summary": "Authenticate to the server",
"arguments": [
{
+ "name": "username",
+ "type": "string",
+ "optional": true
+ },
+ {
"name": "password",
"type": "string"
}
@@ -162,7 +190,7 @@
}
],
"since": "2.6.0",
- "group": "string"
+ "group": "bitmap"
},
"BITFIELD": {
"summary": "Perform arbitrary bitfield integer operations on strings",
@@ -224,7 +252,7 @@
}
],
"since": "3.2.0",
- "group": "string"
+ "group": "bitmap"
},
"BITOP": {
"summary": "Perform bitwise operations between strings",
@@ -245,7 +273,7 @@
}
],
"since": "2.6.0",
- "group": "string"
+ "group": "bitmap"
},
"BITPOS": {
"summary": "Find first bit set or clear in a string",
@@ -260,22 +288,28 @@
"type": "integer"
},
{
- "name": "start",
- "type": "integer",
- "optional": true
- },
- {
- "name": "end",
- "type": "integer",
- "optional": true
+ "name": "index",
+ "type": "block",
+ "optional": true,
+ "block": [
+ {
+ "name": "start",
+ "type": "integer"
+ },
+ {
+ "name": "end",
+ "type": "integer",
+ "optional": true
+ }
+ ]
}
],
"since": "2.8.7",
- "group": "string"
+ "group": "bitmap"
},
"BLPOP": {
"summary": "Remove and get the first element in a list, or block until one is available",
- "complexity": "O(1)",
+ "complexity": "O(N) where N is the number of provided keys.",
"arguments": [
{
"name": "key",
@@ -284,7 +318,7 @@
},
{
"name": "timeout",
- "type": "integer"
+ "type": "double"
}
],
"since": "2.0.0",
@@ -292,7 +326,7 @@
},
"BRPOP": {
"summary": "Remove and get the last element in a list, or block until one is available",
- "complexity": "O(1)",
+ "complexity": "O(N) where N is the number of provided keys.",
"arguments": [
{
"name": "key",
@@ -301,7 +335,7 @@
},
{
"name": "timeout",
- "type": "integer"
+ "type": "double"
}
],
"since": "2.0.0",
@@ -321,12 +355,48 @@
},
{
"name": "timeout",
- "type": "integer"
+ "type": "double"
}
],
"since": "2.2.0",
"group": "list"
},
+ "BLMOVE": {
+ "summary": "Pop an element from a list, push it to another list and return it; or block until one is available",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "source",
+ "type": "key"
+ },
+ {
+ "name": "destination",
+ "type": "key"
+ },
+ {
+ "name": "wherefrom",
+ "type": "enum",
+ "enum": [
+ "LEFT",
+ "RIGHT"
+ ]
+ },
+ {
+ "name": "whereto",
+ "type": "enum",
+ "enum": [
+ "LEFT",
+ "RIGHT"
+ ]
+ },
+ {
+ "name": "timeout",
+ "type": "double"
+ }
+ ],
+ "since": "6.2.0",
+ "group": "list"
+ },
"BZPOPMIN": {
"summary": "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available",
"complexity": "O(log(N)) with N being the number of elements in the sorted set.",
@@ -338,7 +408,7 @@
},
{
"name": "timeout",
- "type": "integer"
+ "type": "double"
}
],
"since": "5.0.0",
@@ -355,7 +425,7 @@
},
{
"name": "timeout",
- "type": "integer"
+ "type": "double"
}
],
"since": "5.0.0",
@@ -383,6 +453,12 @@
"since": "5.0.0",
"group": "connection"
},
+ "CLIENT INFO": {
+ "summary": "Returns information about the current client connection.",
+ "complexity": "O(1)",
+ "since": "6.2.0",
+ "group": "connection"
+ },
"CLIENT KILL": {
"summary": "Kill the connection of a client",
"complexity": "O(N) where N is the number of client connections",
@@ -410,12 +486,24 @@
"optional": true
},
{
+ "command": "USER",
+ "name": "username",
+ "type": "string",
+ "optional": true
+ },
+ {
"command": "ADDR",
"name": "ip:port",
"type": "string",
"optional": true
},
{
+ "command": "LADDR",
+ "name": "ip:port",
+ "type": "string",
+ "optional": true
+ },
+ {
"command": "SKIPME",
"name": "yes/no",
"type": "string",
@@ -439,6 +527,21 @@
"pubsub"
],
"optional": true
+ },
+ {
+ "name": "id",
+ "type": "block",
+ "block": [
+ {
+ "command": "ID"
+ },
+ {
+ "name": "client-id",
+ "type": "integer",
+ "multiple": true
+ }
+ ],
+ "optional": true
}
],
"since": "2.4.0",
@@ -456,6 +559,12 @@
"since": "6.0.0",
"group": "connection"
},
+ "CLIENT UNPAUSE": {
+ "summary": "Resume processing of clients that were paused",
+ "complexity": "O(N) Where N is the number of paused clients",
+ "since": "6.2.0",
+ "group": "connection"
+ },
"CLIENT PAUSE": {
"summary": "Stop processing commands from clients for some time",
"complexity": "O(1)",
@@ -463,6 +572,15 @@
{
"name": "timeout",
"type": "integer"
+ },
+ {
+ "name": "mode",
+ "type": "enum",
+ "optional": true,
+ "enum": [
+ "WRITE",
+ "ALL"
+ ]
}
],
"since": "2.9.50",
@@ -482,7 +600,7 @@
]
}
],
- "since": "3.2",
+ "since": "3.2.0",
"group": "connection"
},
"CLIENT SETNAME": {
@@ -499,7 +617,7 @@
},
"CLIENT TRACKING": {
"summary": "Enable or disable server assisted client side caching support",
- "complexity": "O(1)",
+ "complexity": "O(1). Some options may introduce additional complexity.",
"arguments": [
{
"name": "status",
@@ -518,8 +636,9 @@
{
"command": "PREFIX",
"name": "prefix",
- "type": "srting",
- "optional": true
+ "type": "string",
+ "optional": true,
+ "multiple": true
},
{
"name": "BCAST",
@@ -557,6 +676,12 @@
"since": "6.0.0",
"group": "connection"
},
+ "CLIENT TRACKINGINFO": {
+ "summary": "Return information about server assisted client side caching for the current connection",
+ "complexity": "O(1)",
+ "since": "6.2.0",
+ "group": "connection"
+ },
"CLIENT UNBLOCK": {
"summary": "Unblock a client blocked in a blocking command from a different connection",
"complexity": "O(log N) where N is the number of client connections",
@@ -903,6 +1028,36 @@
"since": "2.0.0",
"group": "server"
},
+ "COPY": {
+ "summary": "Copy a key",
+ "complexity": "O(N) worst case for collections, where N is the number of nested items. O(1) for string values.",
+ "since": "6.2.0",
+ "arguments": [
+ {
+ "name": "source",
+ "type": "key"
+ },
+ {
+ "name": "destination",
+ "type": "key"
+ },
+ {
+ "command": "DB",
+ "name": "destination-db",
+ "type": "integer",
+ "optional": true
+ },
+ {
+ "name": "replace",
+ "type": "enum",
+ "enum": [
+ "REPLACE"
+ ],
+ "optional": true
+ }
+ ],
+ "group": "generic"
+ },
"DBSIZE": {
"summary": "Return the number of keys in the selected database",
"since": "1.0.0",
@@ -972,7 +1127,7 @@
},
"DUMP": {
"summary": "Return a serialized version of the value stored at the specified key.",
- "complexity": "O(1) to access the key and additional O(N*M) to serialized it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).",
+ "complexity": "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1).",
"arguments": [
{
"name": "key",
@@ -1008,17 +1163,45 @@
{
"name": "key",
"type": "key",
+ "optional": true,
"multiple": true
},
{
"name": "arg",
"type": "string",
+ "optional": true,
"multiple": true
}
],
"since": "2.6.0",
"group": "scripting"
},
+ "EVAL_RO": {
+ "summary": "Execute a read-only Lua script server side",
+ "complexity": "Depends on the script that is executed.",
+ "arguments": [
+ {
+ "name": "script",
+ "type": "string"
+ },
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ },
+ {
+ "name": "arg",
+ "type": "string",
+ "multiple": true
+ }
+ ],
+ "since": "7.0.0",
+ "group": "scripting"
+ },
"EVALSHA": {
"summary": "Execute a Lua script server side",
"complexity": "Depends on the script that is executed.",
@@ -1034,17 +1217,45 @@
{
"name": "key",
"type": "key",
+ "optional": true,
"multiple": true
},
{
"name": "arg",
"type": "string",
+ "optional": true,
"multiple": true
}
],
"since": "2.6.0",
"group": "scripting"
},
+ "EVALSHA_RO": {
+ "summary": "Execute a read-only Lua script server side",
+ "complexity": "Depends on the script that is executed.",
+ "arguments": [
+ {
+ "name": "sha1",
+ "type": "string"
+ },
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ },
+ {
+ "name": "arg",
+ "type": "string",
+ "multiple": true
+ }
+ ],
+ "since": "7.0.0",
+ "group": "scripting"
+ },
"EXEC": {
"summary": "Execute all commands issued after MULTI",
"since": "1.2.0",
@@ -1052,7 +1263,7 @@
},
"EXISTS": {
"summary": "Determine if a key exists",
- "complexity": "O(1)",
+ "complexity": "O(N) where N is the number of keys to check.",
"arguments": [
{
"name": "key",
@@ -1095,14 +1306,67 @@
"since": "1.2.0",
"group": "generic"
},
+ "EXPIRETIME": {
+ "summary": "Get the expiration Unix timestamp for a key",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ }
+ ],
+ "since": "7.0.0",
+ "group": "generic"
+ },
+ "FAILOVER": {
+ "summary": "Start a coordinated failover between this server and one of its replicas.",
+ "arguments": [
+ {
+ "name": "target",
+ "type": "block",
+ "optional": true,
+ "block": [
+ {
+ "command": "TO"
+ },
+ {
+ "name": "host",
+ "type": "string"
+ },
+ {
+ "name": "port",
+ "type": "integer"
+ },
+ {
+ "command": "FORCE",
+ "optional": true
+ }
+ ]
+ },
+ {
+ "command": "ABORT",
+ "optional": true
+ },
+ {
+ "command": "TIMEOUT",
+ "name": "milliseconds",
+ "type": "integer",
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "server"
+ },
"FLUSHALL": {
"summary": "Remove all keys from all databases",
+ "complexity": "O(N) where N is the total number of keys in all databases",
"arguments": [
{
"name": "async",
"type": "enum",
"enum": [
- "ASYNC"
+ "ASYNC",
+ "SYNC"
],
"optional": true
}
@@ -1112,12 +1376,14 @@
},
"FLUSHDB": {
"summary": "Remove all keys from the current database",
+ "complexity": "O(N) where N is the number of keys in the selected database",
"arguments": [
{
"name": "async",
"type": "enum",
"enum": [
- "ASYNC"
+ "ASYNC",
+ "SYNC"
],
"optional": true
}
@@ -1134,6 +1400,23 @@
"type": "key"
},
{
+ "name": "condition",
+ "type": "enum",
+ "enum": [
+ "NX",
+ "XX"
+ ],
+ "optional": true
+ },
+ {
+ "name": "change",
+ "type": "enum",
+ "enum": [
+ "CH"
+ ],
+ "optional": true
+ },
+ {
"name": [
"longitude",
"latitude",
@@ -1169,7 +1452,7 @@
},
"GEOPOS": {
"summary": "Returns longitude and latitude of members of a geospatial index",
- "complexity": "O(log(N)) for each member requested, where N is the number of elements in the sorted set.",
+ "complexity": "O(N) where N is the number of members requested.",
"arguments": [
{
"name": "key",
@@ -1270,9 +1553,23 @@
"optional": true
},
{
- "command": "COUNT",
+ "type": "block",
"name": "count",
- "type": "integer",
+ "block": [
+ {
+ "name": "count",
+ "command": "COUNT",
+ "type": "integer"
+ },
+ {
+ "name": "any",
+ "type": "enum",
+ "enum": [
+ "ANY"
+ ],
+ "optional": true
+ }
+ ],
"optional": true
},
{
@@ -1351,9 +1648,23 @@
"optional": true
},
{
- "command": "COUNT",
+ "type": "block",
"name": "count",
- "type": "integer",
+ "block": [
+ {
+ "name": "count",
+ "command": "COUNT",
+ "type": "integer"
+ },
+ {
+ "name": "any",
+ "type": "enum",
+ "enum": [
+ "ANY"
+ ],
+ "optional": true
+ }
+ ],
"optional": true
},
{
@@ -1381,6 +1692,256 @@
"since": "3.2.0",
"group": "geo"
},
+ "GEOSEARCH": {
+ "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle.",
+ "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "command": "FROMMEMBER",
+ "name": "member",
+ "type": "string",
+ "optional": true
+ },
+ {
+ "command": "FROMLONLAT",
+ "name": [
+ "longitude",
+ "latitude"
+ ],
+ "type": [
+ "double",
+ "double"
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "circle",
+ "block": [
+ {
+ "name": "radius",
+ "command": "BYRADIUS",
+ "type": "double"
+ },
+ {
+ "name": "unit",
+ "type": "enum",
+ "enum": [
+ "m",
+ "km",
+ "ft",
+ "mi"
+ ]
+ }
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "box",
+ "block": [
+ {
+ "name": "width",
+ "command": "BYBOX",
+ "type": "double"
+ },
+ {
+ "name": "height",
+ "type": "double"
+ },
+ {
+ "name": "unit",
+ "type": "enum",
+ "enum": [
+ "m",
+ "km",
+ "ft",
+ "mi"
+ ]
+ }
+ ],
+ "optional": true
+ },
+ {
+ "name": "order",
+ "type": "enum",
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "count",
+ "block": [
+ {
+ "name": "count",
+ "command": "COUNT",
+ "type": "integer"
+ },
+ {
+ "name": "any",
+ "type": "enum",
+ "enum": [
+ "ANY"
+ ],
+ "optional": true
+ }
+ ],
+ "optional": true
+ },
+ {
+ "name": "withcoord",
+ "type": "enum",
+ "enum": [
+ "WITHCOORD"
+ ],
+ "optional": true
+ },
+ {
+ "name": "withdist",
+ "type": "enum",
+ "enum": [
+ "WITHDIST"
+ ],
+ "optional": true
+ },
+ {
+ "name": "withhash",
+ "type": "enum",
+ "enum": [
+ "WITHHASH"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2",
+ "group": "geo"
+ },
+ "GEOSEARCHSTORE": {
+ "summary": "Query a sorted set representing a geospatial index to fetch members inside an area of a box or a circle, and store the result in another key.",
+ "complexity": "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape",
+ "arguments": [
+ {
+ "name": "destination",
+ "type": "key"
+ },
+ {
+ "name": "source",
+ "type": "key"
+ },
+ {
+ "command": "FROMMEMBER",
+ "name": "member",
+ "type": "string",
+ "optional": true
+ },
+ {
+ "command": "FROMLONLAT",
+ "name": [
+ "longitude",
+ "latitude"
+ ],
+ "type": [
+ "double",
+ "double"
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "circle",
+ "block": [
+ {
+ "name": "radius",
+ "command": "BYRADIUS",
+ "type": "double"
+ },
+ {
+ "name": "unit",
+ "type": "enum",
+ "enum": [
+ "m",
+ "km",
+ "ft",
+ "mi"
+ ]
+ }
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "box",
+ "block": [
+ {
+ "name": "width",
+ "command": "BYBOX",
+ "type": "double"
+ },
+ {
+ "name": "height",
+ "type": "double"
+ },
+ {
+ "name": "unit",
+ "type": "enum",
+ "enum": [
+ "m",
+ "km",
+ "ft",
+ "mi"
+ ]
+ }
+ ],
+ "optional": true
+ },
+ {
+ "name": "order",
+ "type": "enum",
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "optional": true
+ },
+ {
+ "type": "block",
+ "name": "count",
+ "block": [
+ {
+ "name": "count",
+ "command": "COUNT",
+ "type": "integer"
+ },
+ {
+ "name": "any",
+ "type": "enum",
+ "enum": [
+ "ANY"
+ ],
+ "optional": true
+ }
+ ],
+ "optional": true
+ },
+ {
+ "name": "storedist",
+ "type": "enum",
+ "enum": [
+ "STOREDIST"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2",
+ "group": "geo"
+ },
"GET": {
"summary": "Get the value of a key",
"complexity": "O(1)",
@@ -1407,6 +1968,42 @@
}
],
"since": "2.2.0",
+ "group": "bitmap"
+ },
+ "GETDEL": {
+ "summary":"Get the value of a key and delete the key",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ }
+ ],
+ "since": "6.2.0",
+ "group": "string"
+ },
+ "GETEX": {
+ "summary": "Get the value of a key and optionally set its expiration",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "expiration",
+ "type": "enum",
+ "enum": [
+ "EX seconds",
+ "PX milliseconds",
+ "EXAT timestamp",
+ "PXAT milliseconds-timestamp",
+ "PERSIST"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
"group": "string"
},
"GETRANGE": {
@@ -1463,30 +2060,37 @@
"group": "hash"
},
"HELLO": {
- "summary": "switch Redis protocol",
+ "summary": "Handshake with Redis",
"complexity": "O(1)",
"arguments": [
{
- "name": "protover",
- "type": "integer"
- },
- {
- "command": "AUTH",
- "name": [
- "username",
- "password"
- ],
- "type": [
- "string",
- "string"
+ "name": "arguments",
+ "type": "block",
+ "block": [
+ {
+ "name": "protover",
+ "type": "integer"
+ },
+ {
+ "command": "AUTH",
+ "name": [
+ "username",
+ "password"
+ ],
+ "type": [
+ "string",
+ "string"
+ ],
+ "optional": true
+ },
+ {
+ "command": "SETNAME",
+ "name": "clientname",
+ "type": "string",
+ "optional": true
+ }
],
"optional": true
- },
- {
- "command": "SETNAME",
- "name": "clientname",
- "type": "string",
- "optional": true
}
],
"since": "6.0.0",
@@ -1683,6 +2287,37 @@
"since": "2.0.0",
"group": "hash"
},
+ "HRANDFIELD": {
+ "summary": "Get one or multiple random fields from a hash",
+ "complexity": "O(N) where N is the number of fields returned",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "options",
+ "type": "block",
+ "block": [
+ {
+ "name": "count",
+ "type": "integer"
+ },
+ {
+ "name": "withvalues",
+ "type": "enum",
+ "enum": [
+ "WITHVALUES"
+ ],
+ "optional": true
+ }
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "hash"
+ },
"HSTRLEN": {
"summary": "Get the length of the value of a hash field",
"complexity": "O(1)",
@@ -1854,17 +2489,56 @@
"group": "list"
},
"LPOP": {
- "summary": "Remove and get the first element in a list",
- "complexity": "O(1)",
+ "summary": "Remove and get the first elements in a list",
+ "complexity": "O(N) where N is the number of elements returned",
"arguments": [
{
"name": "key",
"type": "key"
+ },
+ {
+ "name": "count",
+ "type": "integer",
+ "optional": true
}
],
"since": "1.0.0",
"group": "list"
},
+ "LPOS": {
+ "summary": "Return the index of matching elements on a list",
+ "complexity": "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time.",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "element",
+ "type": "string"
+ },
+ {
+ "command": "RANK",
+ "name": "rank",
+ "type": "integer",
+ "optional": true
+ },
+ {
+ "command": "COUNT",
+ "name": "num-matches",
+ "type": "integer",
+ "optional": true
+ },
+ {
+ "command": "MAXLEN",
+ "name": "len",
+ "type": "integer",
+ "optional": true
+ }
+ ],
+ "since": "6.0.6",
+ "group": "list"
+ },
"LPUSH": {
"summary": "Prepend one or multiple elements to a list",
"complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.",
@@ -2086,6 +2760,12 @@
"optional": true
},
{
+ "command": "AUTH2",
+ "name": "username password",
+ "type": "string",
+ "optional": true
+ },
+ {
"name": "key",
"command": "KEYS",
"type": "key",
@@ -2258,6 +2938,18 @@
"since": "2.6.0",
"group": "generic"
},
+ "PEXPIRETIME": {
+ "summary": "Get the expiration Unix timestamp for a key in milliseconds",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ }
+ ],
+ "since": "7.0.0",
+ "group": "generic"
+ },
"PFADD": {
"summary": "Adds the specified elements to the specified HyperLogLog.",
"complexity": "O(1) to add every element.",
@@ -2269,6 +2961,7 @@
{
"name": "element",
"type": "string",
+ "optional": true,
"multiple": true
}
],
@@ -2469,6 +3162,11 @@
"since": "1.0.0",
"group": "generic"
},
+ "RESET": {
+ "summary": "Reset the connection",
+ "since": "6.2",
+ "group": "connection"
+ },
"RESTORE": {
"summary": "Create a key using the provided serialized value, previously obtained using DUMP.",
"complexity": "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).",
@@ -2523,12 +3221,17 @@
"group": "server"
},
"RPOP": {
- "summary": "Remove and get the last element in a list",
- "complexity": "O(1)",
+ "summary": "Remove and get the last elements in a list",
+ "complexity": "O(N) where N is the number of elements returned",
"arguments": [
{
"name": "key",
"type": "key"
+ },
+ {
+ "name": "count",
+ "type": "integer",
+ "optional": true
}
],
"since": "1.0.0",
@@ -2550,6 +3253,38 @@
"since": "1.2.0",
"group": "list"
},
+ "LMOVE": {
+ "summary": "Pop an element from a list, push it to another list and return it",
+ "complexity": "O(1)",
+ "arguments": [
+ {
+ "name": "source",
+ "type": "key"
+ },
+ {
+ "name": "destination",
+ "type": "key"
+ },
+ {
+ "name": "wherefrom",
+ "type": "enum",
+ "enum": [
+ "LEFT",
+ "RIGHT"
+ ]
+ },
+ {
+ "name": "whereto",
+ "type": "enum",
+ "enum": [
+ "LEFT",
+ "RIGHT"
+ ]
+ }
+ ],
+ "since": "6.2.0",
+ "group": "list"
+ },
"RPUSH": {
"summary": "Append one or multiple elements to a list",
"complexity": "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments.",
@@ -2650,6 +3385,17 @@
},
"SCRIPT FLUSH": {
"summary": "Remove all the scripts from the script cache.",
+ "arguments": [
+ {
+ "name": "async",
+ "type": "enum",
+ "enum": [
+ "ASYNC",
+ "SYNC"
+ ],
+ "optional": true
+ }
+ ],
"complexity": "O(N) with N being the number of scripts in cache",
"since": "2.6.0",
"group": "scripting"
@@ -2730,7 +3476,10 @@
"type": "enum",
"enum": [
"EX seconds",
- "PX milliseconds"
+ "PX milliseconds",
+ "EXAT timestamp",
+ "PXAT milliseconds-timestamp",
+ "KEEPTTL"
],
"optional": true
},
@@ -2744,10 +3493,10 @@
"optional": true
},
{
- "name": "keepttl",
+ "name": "get",
"type": "enum",
"enum": [
- "KEEPTTL"
+ "GET"
],
"optional": true
}
@@ -2773,7 +3522,7 @@
}
],
"since": "2.2.0",
- "group": "string"
+ "group": "bitmap"
},
"SETEX": {
"summary": "Set the value and expiration of a key",
@@ -2893,6 +3642,23 @@
"since": "1.0.0",
"group": "set"
},
+ "SMISMEMBER": {
+ "summary": "Returns the membership associated with the given elements for a set",
+ "complexity": "O(N) where N is the number of elements being checked for membership",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "member",
+ "type": "string",
+ "multiple": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "set"
+ },
"SLAVEOF": {
"summary": "Make the server a replica of another instance, or promote it as master. Deprecated starting with Redis 5. Use REPLICAOF instead.",
"arguments": [
@@ -3033,7 +3799,7 @@
},
"SPOP": {
"summary": "Remove and return one or multiple random members from a set",
- "complexity": "O(1)",
+ "complexity": "Without the count argument O(1), otherwise O(N) where N is the value of the passed count.",
"arguments": [
{
"name": "key",
@@ -3159,6 +3925,7 @@
},
"SWAPDB": {
"summary": "Swaps two Redis databases",
+ "complexity": "O(N) where N is the count of clients watching or blocking on keys from both databases.",
"arguments": [
{
"name": "index1",
@@ -3315,6 +4082,15 @@
"optional": true
},
{
+ "name": "comparison",
+ "type": "enum",
+ "enum": [
+ "GT",
+ "LT"
+ ],
+ "optional": true
+ },
+ {
"name": "change",
"type": "enum",
"enum": [
@@ -3377,6 +4153,52 @@
"since": "2.0.0",
"group": "sorted_set"
},
+ "ZDIFF": {
+ "summary": "Subtract multiple sorted sets",
+ "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
+ "arguments": [
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ },
+ {
+ "name": "withscores",
+ "type": "enum",
+ "enum": [
+ "WITHSCORES"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
+ "ZDIFFSTORE": {
+ "summary": "Subtract multiple sorted sets and store the resulting sorted set in a new key",
+ "complexity": "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set.",
+ "arguments": [
+ {
+ "name": "destination",
+ "type": "key"
+ },
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
"ZINCRBY": {
"summary": "Increment the score of a member in a sorted set",
"complexity": "O(log(N)) where N is the number of elements in the sorted set.",
@@ -3397,6 +4219,49 @@
"since": "1.2.0",
"group": "sorted_set"
},
+ "ZINTER": {
+ "summary": "Intersect multiple sorted sets",
+ "complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.",
+ "arguments": [
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ },
+ {
+ "command": "WEIGHTS",
+ "name": "weight",
+ "type": "integer",
+ "variadic": true,
+ "optional": true
+ },
+ {
+ "command": "AGGREGATE",
+ "name": "aggregate",
+ "type": "enum",
+ "enum": [
+ "SUM",
+ "MIN",
+ "MAX"
+ ],
+ "optional": true
+ },
+ {
+ "name": "withscores",
+ "type": "enum",
+ "enum": [
+ "WITHSCORES"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
"ZINTERSTORE": {
"summary": "Intersect multiple sorted sets and store the resulting sorted set in a new key",
"complexity": "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set.",
@@ -3490,8 +4355,92 @@
"since": "5.0.0",
"group": "sorted_set"
},
+ "ZRANDMEMBER": {
+ "summary": "Get one or multiple random elements from a sorted set",
+ "complexity": "O(N) where N is the number of elements returned",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "options",
+ "type": "block",
+ "block": [
+ {
+ "name": "count",
+ "type": "integer"
+ },
+ {
+ "name": "withscores",
+ "type": "enum",
+ "enum": [
+ "WITHSCORES"
+ ],
+ "optional": true
+ }
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
+ "ZRANGESTORE": {
+ "summary": "Store a range of members from sorted set into another key",
+ "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key.",
+ "arguments": [
+ {
+ "name": "dst",
+ "type": "key"
+ },
+ {
+ "name": "src",
+ "type": "key"
+ },
+ {
+ "name": "min",
+ "type": "string"
+ },
+ {
+ "name": "max",
+ "type": "string"
+ },
+ {
+ "name": "sortby",
+ "type": "enum",
+ "enum": [
+ "BYSCORE",
+ "BYLEX"
+ ],
+ "optional": true
+ },
+ {
+ "name": "rev",
+ "type": "enum",
+ "enum": [
+ "REV"
+ ],
+ "optional": true
+ },
+ {
+ "command": "LIMIT",
+ "name": [
+ "offset",
+ "count"
+ ],
+ "type": [
+ "integer",
+ "integer"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
"ZRANGE": {
- "summary": "Return a range of members in a sorted set, by index",
+ "summary": "Return a range of members in a sorted set",
"complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned.",
"arguments": [
{
@@ -3499,12 +4448,41 @@
"type": "key"
},
{
- "name": "start",
- "type": "integer"
+ "name": "min",
+ "type": "string"
},
{
- "name": "stop",
- "type": "integer"
+ "name": "max",
+ "type": "string"
+ },
+ {
+ "name": "sortby",
+ "type": "enum",
+ "enum": [
+ "BYSCORE",
+ "BYLEX"
+ ],
+ "optional": true
+ },
+ {
+ "name": "rev",
+ "type": "enum",
+ "enum": [
+ "REV"
+ ],
+ "optional": true
+ },
+ {
+ "command": "LIMIT",
+ "name": [
+ "offset",
+ "count"
+ ],
+ "type": [
+ "integer",
+ "integer"
+ ],
+ "optional": true
},
{
"name": "withscores",
@@ -3815,6 +4793,66 @@
"since": "1.2.0",
"group": "sorted_set"
},
+ "ZUNION": {
+ "summary": "Add multiple sorted sets",
+ "complexity": "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.",
+ "arguments": [
+ {
+ "name": "numkeys",
+ "type": "integer"
+ },
+ {
+ "name": "key",
+ "type": "key",
+ "multiple": true
+ },
+ {
+ "command": "WEIGHTS",
+ "name": "weight",
+ "type": "integer",
+ "variadic": true,
+ "optional": true
+ },
+ {
+ "command": "AGGREGATE",
+ "name": "aggregate",
+ "type": "enum",
+ "enum": [
+ "SUM",
+ "MIN",
+ "MAX"
+ ],
+ "optional": true
+ },
+ {
+ "name": "withscores",
+ "type": "enum",
+ "enum": [
+ "WITHSCORES"
+ ],
+ "optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
+ "ZMSCORE": {
+ "summary": "Get the score associated with the given members in a sorted set",
+ "complexity": "O(N) where N is the number of members being requested.",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
+ },
+ {
+ "name": "member",
+ "type": "string",
+ "multiple": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "sorted_set"
+ },
"ZUNIONSTORE": {
"summary": "Add multiple sorted sets and store the resulting sorted set in a new key",
"complexity": "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set.",
@@ -4010,15 +5048,56 @@
},
"XADD": {
"summary": "Appends a new entry to a stream",
- "complexity": "O(1)",
+ "complexity": "O(1) when adding a new entry, O(N) when trimming where N being the number of entires evicted.",
"arguments": [
{
"name": "key",
"type": "key"
},
{
- "name": "ID",
- "type": "string"
+ "command": "NOMKSTREAM",
+ "optional": true
+ },
+ {
+ "name": "trim",
+ "type": "block",
+ "optional": true,
+ "block": [
+ {
+ "name": "strategy",
+ "type": "enum",
+ "enum": [
+ "MAXLEN",
+ "MINID"
+ ]
+ },
+ {
+ "name": "operator",
+ "type": "enum",
+ "enum": [
+ "=",
+ "~"
+ ],
+ "optional": true
+ },
+ {
+ "name": "threshold",
+ "type": "string"
+ },
+ {
+ "command": "LIMIT",
+ "name": "count",
+ "type": "integer",
+ "optional": true
+ }
+ ]
+ },
+ {
+ "type": "enum",
+ "enum": [
+ "*",
+ "ID"
+ ]
},
{
"name": [
@@ -4044,23 +5123,37 @@
"type": "key"
},
{
- "name": "strategy",
- "type": "enum",
- "enum": [
- "MAXLEN"
+ "name": "trim",
+ "type": "block",
+ "block": [
+ {
+ "name": "strategy",
+ "type": "enum",
+ "enum": [
+ "MAXLEN",
+ "MINID"
+ ]
+ },
+ {
+ "name": "operator",
+ "type": "enum",
+ "enum": [
+ "=",
+ "~"
+ ],
+ "optional": true
+ },
+ {
+ "name": "threshold",
+ "type": "string"
+ },
+ {
+ "command": "LIMIT",
+ "name": "count",
+ "type": "integer",
+ "optional": true
+ }
]
- },
- {
- "name": "approx",
- "type": "enum",
- "enum": [
- "~"
- ],
- "optional": true
- },
- {
- "name": "count",
- "type": "integer"
}
],
"since": "5.0.0",
@@ -4136,7 +5229,7 @@
"group": "stream"
},
"XLEN": {
- "summary": "Return the number of entires in a stream",
+ "summary": "Return the number of entries in a stream",
"complexity": "O(1)",
"arguments": [
{
@@ -4176,7 +5269,7 @@
"multiple": true
},
{
- "name": "id",
+ "name": "ID",
"type": "string",
"multiple": true
}
@@ -4189,41 +5282,83 @@
"complexity": "O(1) for all the subcommands, with the exception of the DESTROY subcommand which takes an additional O(M) time in order to delete the M entries inside the consumer group pending entries list (PEL).",
"arguments": [
{
- "command": "CREATE",
- "name": [
- "key",
- "groupname",
- "id-or-$"
+ "name": "create",
+ "type": "block",
+ "block": [
+ {
+ "command": "CREATE",
+ "name": [
+ "key",
+ "groupname"
+ ],
+ "type": [
+ "key",
+ "string"
+ ]
+ },
+ {
+ "name": "id",
+ "type": "enum",
+ "enum": [
+ "ID",
+ "$"
+ ]
+ },
+ {
+ "command": "MKSTREAM",
+ "optional": true
+ }
],
- "type": [
- "key",
- "string",
- "string"
+ "optional": true
+ },
+ {
+ "name": "setid",
+ "type": "block",
+ "block": [
+ {
+ "command": "SETID",
+ "name": [
+ "key",
+ "groupname"
+ ],
+ "type": [
+ "key",
+ "string"
+ ]
+ },
+ {
+ "name": "id",
+ "type": "enum",
+ "enum": [
+ "ID",
+ "$"
+ ]
+ }
],
"optional": true
},
{
- "command": "SETID",
+ "command": "DESTROY",
"name": [
"key",
- "groupname",
- "id-or-$"
+ "groupname"
],
"type": [
"key",
- "string",
"string"
],
"optional": true
},
{
- "command": "DESTROY",
+ "command": "CREATECONSUMER",
"name": [
"key",
- "groupname"
+ "groupname",
+ "consumername"
],
"type": [
"key",
+ "string",
"string"
],
"optional": true
@@ -4384,9 +5519,9 @@
"since": "5.0.0",
"group": "stream"
},
- "XPENDING": {
- "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.",
- "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). When the command returns just the summary it runs in O(1) time assuming the list of consumers is small, otherwise there is additional O(N) time needed to iterate every consumer.",
+ "XAUTOCLAIM": {
+ "summary": "Changes (or acquires) ownership of messages in a consumer group, as if the messages were delivered to the specified consumer.",
+ "complexity": "O(1) if COUNT is small.",
"arguments": [
{
"name": "key",
@@ -4397,21 +5532,74 @@
"type": "string"
},
{
- "name": [
- "start",
- "end",
- "count"
- ],
- "type": [
- "string",
- "string",
- "integer"
+ "name": "consumer",
+ "type": "string"
+ },
+ {
+ "name": "min-idle-time",
+ "type": "string"
+ },
+ {
+ "name": "start",
+ "type": "string"
+ },
+ {
+ "command": "COUNT",
+ "name": "count",
+ "type": "integer",
+ "optional": true
+ },
+ {
+ "name": "justid",
+ "enum": [
+ "JUSTID"
],
"optional": true
+ }
+ ],
+ "since": "6.2.0",
+ "group": "stream"
+ },
+ "XPENDING": {
+ "summary": "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.",
+ "complexity": "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer.",
+ "arguments": [
+ {
+ "name": "key",
+ "type": "key"
},
{
- "name": "consumer",
- "type": "string",
+ "name": "group",
+ "type": "string"
+ },
+ {
+ "type": "block",
+ "name": "filters",
+ "block": [
+ {
+ "command": "IDLE",
+ "name": "min-idle-time",
+ "type": "integer",
+ "optional": true
+ },
+ {
+ "name": "start",
+ "type": "string"
+ },
+ {
+ "name": "end",
+ "type": "string"
+ },
+ {
+ "name": "count",
+ "type": "integer"
+ },
+ {
+ "name": "consumer",
+ "type": "string",
+ "optional": true
+ }
+ ],
"optional": true
}
],
@@ -4456,7 +5644,8 @@
{
"name": "event",
"type": "string",
- "optional": true
+ "optional": true,
+ "multiple": true
}
],
"since": "2.8.13",
diff --git a/iredis/data/commands/acl-getuser.md b/iredis/data/commands/acl-getuser.md
index a3bebe3..4f6ae57 100644
--- a/iredis/data/commands/acl-getuser.md
+++ b/iredis/data/commands/acl-getuser.md
@@ -8,6 +8,10 @@ rules used to configure the user, it is still functionally identical.
@array-reply: a list of ACL rule definitions for the user.
+@history
+
+- `>= 6.2`: Added Pub/Sub channel patterns.
+
@examples
Here's the default configuration for the default user:
@@ -25,4 +29,6 @@ Here's the default configuration for the default user:
6) "+@all"
7) "keys"
8) 1) "*"
+9) "channels"
+10) 1) "*"
```
diff --git a/iredis/data/commands/acl-list.md b/iredis/data/commands/acl-list.md
index ebfa36b..6d6be4f 100644
--- a/iredis/data/commands/acl-list.md
+++ b/iredis/data/commands/acl-list.md
@@ -12,6 +12,6 @@ An array of strings.
```
> ACL LIST
-1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* +@all -@admin -@dangerous"
-2) "user default on nopass ~* +@all"
+1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* &* +@all -@admin -@dangerous"
+2) "user default on nopass ~* &* +@all"
```
diff --git a/iredis/data/commands/acl-setuser.md b/iredis/data/commands/acl-setuser.md
index 476d178..4dca6d1 100644
--- a/iredis/data/commands/acl-setuser.md
+++ b/iredis/data/commands/acl-setuser.md
@@ -55,10 +55,17 @@ This is a list of all the supported Redis ACL rules:
deleted user to be disconnected.
- `~<pattern>`: add the specified key pattern (glob style pattern, like in the
`KEYS` command), to the list of key patterns accessible by the user. You can
- add as many key patterns you want to the same user. Example: `~objects:*`
+ add multiple key patterns to the same user. Example: `~objects:*`
- `allkeys`: alias for `~*`, it allows the user to access all the keys.
-- `resetkey`: removes all the key patterns from the list of key patterns the
+- `resetkeys`: removes all the key patterns from the list of key patterns the
user can access.
+- `&<pattern>`: add the specified glob style pattern to the list of Pub/Sub
+ channel patterns accessible by the user. You can add multiple channel patterns
+ to the same user. Example: `&chatroom:*`
+- `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub
+ channels.
+- `resetchannels`: removes all channel patterns from the list of Pub/Sub channel
+ patterns the user can access.
- `+<command>`: add this command to the list of the commands the user can call.
Example: `+zadd`.
- `+@<category>`: add all the commands in the specified category to the list of
@@ -87,7 +94,7 @@ This is a list of all the supported Redis ACL rules:
- `>password`: Add the specified clear text password as an hashed password in
the list of the users passwords. Every user can have many active passwords, so
that password rotation will be simpler. The specified password is not stored
- in cleartext inside the server. Example: `>mypassword`.
+ as clear text inside the server. Example: `>mypassword`.
- `#<hashedpassword>`: Add the specified hashed password to the list of user
passwords. A Redis hashed password is hashed with SHA256 and translated into a
hexadecimal string. Example:
@@ -104,6 +111,10 @@ This is a list of all the supported Redis ACL rules:
If the rules contain errors, the error is returned.
+@history
+
+- `>= 6.2`: Added Pub/Sub channel patterns.
+
@examples
```
diff --git a/iredis/data/commands/auth.md b/iredis/data/commands/auth.md
index 4b75171..2e93f70 100644
--- a/iredis/data/commands/auth.md
+++ b/iredis/data/commands/auth.md
@@ -29,6 +29,10 @@ defined in the ACL list (see `ACL SETUSER`) and the official
When ACLs are used, the single argument form of the command, where only the
password is specified, assumes that the implicit username is "default".
+@history
+
+- `>= 6.0.0`: Added ACL style (username and password).
+
## Security notice
Because of the high performance nature of Redis, it is possible to try a lot of
diff --git a/iredis/data/commands/bitfield.md b/iredis/data/commands/bitfield.md
index 80563fa..6d10d93 100644
--- a/iredis/data/commands/bitfield.md
+++ b/iredis/data/commands/bitfield.md
@@ -20,11 +20,11 @@ offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0:
Note that:
-1. Addressing with `GET` bits outside the current string length (including the
+1. Addressing with `!GET` bits outside the current string length (including the
case the key does not exist at all), results in the operation to be performed
like the missing part all consists of bits set to 0.
-2. Addressing with `SET` or `INCRBY` bits outside the current string length will
- enlarge the string, zero-padding it, as needed, for the minimal length
+2. Addressing with `!SET` or `!INCRBY` bits outside the current string length
+ will enlarge the string, zero-padding it, as needed, for the minimal length
needed, according to the most far bit touched.
## Supported subcommands and integer types
@@ -39,7 +39,7 @@ The following is the list of supported commands.
value.
There is another subcommand that only changes the behavior of successive
-`INCRBY` subcommand calls by setting the overflow behavior:
+`!INCRBY` and `!SET` subcommands calls by setting the overflow behavior:
- **OVERFLOW** `[WRAP|SAT|FAIL]`
@@ -91,8 +91,9 @@ following behaviors:
detected. The corresponding return value is set to NULL to signal the
condition to the caller.
-Note that each `OVERFLOW` statement only affects the `INCRBY` commands that
-follow it in the list of subcommands, up to the next `OVERFLOW` statement.
+Note that each `OVERFLOW` statement only affects the `!INCRBY` and `!SET`
+commands that follow it in the list of subcommands, up to the next `OVERFLOW`
+statement.
By default, **WRAP** is used if not otherwise specified.
diff --git a/iredis/data/commands/blmove.md b/iredis/data/commands/blmove.md
new file mode 100644
index 0000000..3edf4e5
--- /dev/null
+++ b/iredis/data/commands/blmove.md
@@ -0,0 +1,23 @@
+`BLMOVE` is the blocking variant of `LMOVE`. When `source` contains elements,
+this command behaves exactly like `LMOVE`. When used inside a `MULTI`/`EXEC`
+block, this command behaves exactly like `LMOVE`. When `source` is empty, Redis
+will block the connection until another client pushes to it or until `timeout`
+is reached. A `timeout` of zero can be used to block indefinitely.
+
+This command comes in place of the now deprecated `BRPOPLPUSH`. Doing
+`BLMOVE RIGHT LEFT` is equivalent.
+
+See `LMOVE` for more information.
+
+@return
+
+@bulk-string-reply: the element being popped from `source` and pushed to
+`destination`. If `timeout` is reached, a @nil-reply is returned.
+
+## Pattern: Reliable queue
+
+Please see the pattern description in the `LMOVE` documentation.
+
+## Pattern: Circular list
+
+Please see the pattern description in the `LMOVE` documentation.
diff --git a/iredis/data/commands/blpop.md b/iredis/data/commands/blpop.md
index b48ace7..e44037e 100644
--- a/iredis/data/commands/blpop.md
+++ b/iredis/data/commands/blpop.md
@@ -34,7 +34,7 @@ client will unblock returning a `nil` multi-bulk value when the specified
timeout has expired without a push operation against at least one of the
specified keys.
-**The timeout argument is interpreted as an integer value specifying the maximum
+**The timeout argument is interpreted as a double value specifying the maximum
number of seconds to block**. A timeout of zero can be used to block
indefinitely.
@@ -129,6 +129,10 @@ If you like science fiction, think of time flowing at infinite speed inside a
where an element was popped and the second element being the value of the
popped element.
+@history
+
+- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
+
@examples
```
diff --git a/iredis/data/commands/brpop.md b/iredis/data/commands/brpop.md
index e0bb650..ca7df79 100644
--- a/iredis/data/commands/brpop.md
+++ b/iredis/data/commands/brpop.md
@@ -18,6 +18,10 @@ the tail of a list instead of popping from the head.
where an element was popped and the second element being the value of the
popped element.
+@history
+
+- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
+
@examples
```
diff --git a/iredis/data/commands/brpoplpush.md b/iredis/data/commands/brpoplpush.md
index 1c3a9b3..b37183f 100644
--- a/iredis/data/commands/brpoplpush.md
+++ b/iredis/data/commands/brpoplpush.md
@@ -5,6 +5,9 @@ elements, this command behaves exactly like `RPOPLPUSH`. When used inside a
to it or until `timeout` is reached. A `timeout` of zero can be used to block
indefinitely.
+As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please prefer `BLMOVE`
+in new code.
+
See `RPOPLPUSH` for more information.
@return
@@ -12,6 +15,10 @@ See `RPOPLPUSH` for more information.
@bulk-string-reply: the element being popped from `source` and pushed to
`destination`. If `timeout` is reached, a @nil-reply is returned.
+@history
+
+- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
+
## Pattern: Reliable queue
Please see the pattern description in the `RPOPLPUSH` documentation.
diff --git a/iredis/data/commands/bzpopmax.md b/iredis/data/commands/bzpopmax.md
index a061b0e..6ab6543 100644
--- a/iredis/data/commands/bzpopmax.md
+++ b/iredis/data/commands/bzpopmax.md
@@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the highest
score is popped from first sorted set that is non-empty, with the given keys
being checked in the order that they are given.
-The `timeout` argument is interpreted as an integer value specifying the maximum
+The `timeout` argument is interpreted as a double value specifying the maximum
number of seconds to block. A timeout of zero can be used to block indefinitely.
See the [BZPOPMIN documentation][cb] for the exact semantics, since `BZPOPMAX`
@@ -23,6 +23,10 @@ with the highest scores instead of popping the ones with the lowest scores.
where a member was popped, the second element is the popped member itself, and
the third element is the score of the popped element.
+@history
+
+- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
+
@examples
```
diff --git a/iredis/data/commands/bzpopmin.md b/iredis/data/commands/bzpopmin.md
index 118a821..5541726 100644
--- a/iredis/data/commands/bzpopmin.md
+++ b/iredis/data/commands/bzpopmin.md
@@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the lowest score
is popped from first sorted set that is non-empty, with the given keys being
checked in the order that they are given.
-The `timeout` argument is interpreted as an integer value specifying the maximum
+The `timeout` argument is interpreted as an double value specifying the maximum
number of seconds to block. A timeout of zero can be used to block indefinitely.
See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is
@@ -23,6 +23,10 @@ popped from.
where a member was popped, the second element is the popped member itself, and
the third element is the score of the popped element.
+@history
+
+- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
+
@examples
```
diff --git a/iredis/data/commands/client-caching.md b/iredis/data/commands/client-caching.md
index 7bbb439..13346e3 100644
--- a/iredis/data/commands/client-caching.md
+++ b/iredis/data/commands/client-caching.md
@@ -1,7 +1,7 @@
This command controls the tracking of the keys in the next command executed by
the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. Please
check the [client side caching documentation](/topics/client-side-caching) for
-background informations.
+background information.
When tracking is enabled Redis, using the `CLIENT TRACKING` command, it is
possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only
diff --git a/iredis/data/commands/client-info.md b/iredis/data/commands/client-info.md
new file mode 100644
index 0000000..ab64543
--- /dev/null
+++ b/iredis/data/commands/client-info.md
@@ -0,0 +1,16 @@
+The command returns information and statistics about the current client
+connection in a mostly human readable format.
+
+The reply format is identical to that of `CLIENT LIST`, and the content consists
+only of information about the current client.
+
+@examples
+
+```cli
+CLIENT INFO
+```
+
+@return
+
+@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for
+the current client.
diff --git a/iredis/data/commands/client-kill.md b/iredis/data/commands/client-kill.md
index 9cdb054..1b47313 100644
--- a/iredis/data/commands/client-kill.md
+++ b/iredis/data/commands/client-kill.md
@@ -1,14 +1,12 @@
-The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11
-it was possible to close a connection only by client address, using the
-following form:
+The `CLIENT KILL` command closes a given client connection. This command support
+two formats, the old format:
CLIENT KILL addr:port
The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr`
field).
-However starting with Redis 2.8.12 or greater, the command accepts the following
-form:
+The new format:
CLIENT KILL <filter> <value> ... ... <filter> <value>
@@ -17,13 +15,14 @@ of killing just by address. The following filters are available:
- `CLIENT KILL ADDR ip:port`. This is exactly the same as the old
three-arguments behavior.
-- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field,
- which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12.
-- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `slave`
- and `pubsub` (the `master` type is available from v3.2). This closes the
- connections of **all the clients** in the specified class. Note that clients
- blocked into the `MONITOR` command are considered to belong to the `normal`
- class.
+- `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local
+ (bind) address.
+- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field.
+ Client `ID`'s are retrieved using the `CLIENT LIST` command.
+- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `replica`
+ and `pubsub`. This closes the connections of **all the clients** in the
+ specified class. Note that clients blocked into the `MONITOR` command are
+ considered to belong to the `normal` class.
- `CLIENT KILL USER username`. Closes all the connections that are authenticated
with the specified [ACL](/topics/acl) username, however it returns an error if
the username does not map to an existing ACL user.
@@ -32,10 +31,6 @@ of killing just by address. The following filters are available:
option to `no` will have the effect of also killing the client calling the
command.
-**Note: starting with Redis 5 the project is no longer using the slave word. You
-can use `TYPE replica` instead, however the old form is still supported for
-backward compatibility.**
-
It is possible to provide multiple filters at the same time. The command will
handle multiple filters via logical AND. For example:
@@ -71,3 +66,12 @@ When called with the three arguments format:
When called with the filter / value format:
@integer-reply: the number of clients killed.
+
+@history
+
+- `>= 2.8.12`: Added new filter format.
+- `>= 2.8.12`: `ID` option.
+- `>= 3.2`: Added `master` type in for `TYPE` option.
+- `>= 5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for
+ backward compatibility.
+- `>= 6.2`: `LADDR` option.
diff --git a/iredis/data/commands/client-list.md b/iredis/data/commands/client-list.md
index ea0b775..7956f2b 100644
--- a/iredis/data/commands/client-list.md
+++ b/iredis/data/commands/client-list.md
@@ -1,10 +1,13 @@
The `CLIENT LIST` command returns information and statistics about the client
connections server in a mostly human readable format.
-As of v5.0, the optional `TYPE type` subcommand can be used to filter the list
-by clients' type, where _type_ is one of `normal`, `master`, `replica` and
-`pubsub`. Note that clients blocked into the `MONITOR` command are considered to
-belong to the `normal` class.
+You can use one of the optional subcommands to filter the list. The `TYPE type`
+subcommand filters the list by clients' type, where _type_ is one of `normal`,
+`master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR`
+command belong to the `normal` class.
+
+The `ID` filter only returns entries for clients with IDs matching the
+`client-id` arguments.
@return
@@ -16,9 +19,10 @@ belong to the `normal` class.
Here is the meaning of the fields:
-- `id`: an unique 64-bit client ID (introduced in Redis 2.8.12).
+- `id`: an unique 64-bit client ID.
- `name`: the name set by the client with `CLIENT SETNAME`
- `addr`: address/port of the client
+- `laddr`: address/port of local address client connected to (bind address)
- `fd`: file descriptor corresponding to the socket
- `age`: total duration of the connection in seconds
- `idle`: idle time of the connection in seconds
@@ -35,6 +39,11 @@ Here is the meaning of the fields:
- `omem`: output buffer memory usage
- `events`: file descriptor events (see below)
- `cmd`: last command played
+- `argv-mem`: incomplete arguments for the next command (already extracted from
+ query buffer)
+- `tot-mem`: total memory consumed by this client in its various buffers
+- `redir`: client id of current client tracking redirection
+- `user`: the authenticated username of the client
The client flags can be a combination of:
@@ -53,6 +62,9 @@ S: the client is a replica node connection to this instance
u: the client is unblocked
U: the client is connected via a Unix domain socket
x: the client is in a MULTI/EXEC context
+t: the client enabled keys tracking in order to perform client side caching
+R: the client tracking target client is invalid
+B: the client enabled broadcast tracking mode
```
The file descriptor events can be:
@@ -68,3 +80,9 @@ New fields are regularly added for debugging purpose. Some could be removed in
the future. A version safe Redis client using this command should parse the
output accordingly (i.e. handling gracefully missing fields, skipping unknown
fields).
+
+@history
+
+- `>= 2.8.12`: Added unique client `id` field.
+- `>= 5.0`: Added optional `TYPE` filter.
+- `>= 6.2`: Added `laddr` field and the optional `ID` filter.
diff --git a/iredis/data/commands/client-pause.md b/iredis/data/commands/client-pause.md
index bdbdda0..3de6beb 100644
--- a/iredis/data/commands/client-pause.md
+++ b/iredis/data/commands/client-pause.md
@@ -3,14 +3,28 @@ clients for the specified amount of time (in milliseconds).
The command performs the following actions:
-- It stops processing all the pending commands from normal and pub/sub clients.
- However interactions with replicas will continue normally.
+- It stops processing all the pending commands from normal and pub/sub clients
+ for the given mode. However interactions with replicas will continue normally.
+ Note that clients are formally paused when they try to execute a command, so
+ no work is taken on the server side for inactive clients.
- However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command
execution is not paused by itself.
- When the specified amount of time has elapsed, all the clients are unblocked:
this will trigger the processing of all the commands accumulated in the query
buffer of every client during the pause.
+Client pause currently supports two modes:
+
+- `ALL`: This is the default mode. All client commands are blocked.
+- `WRITE`: Clients are only blocked if they attempt to execute a write command.
+
+For the `WRITE` mode, some commands have special behavior:
+
+- `EVAL`/`EVALSHA`: Will block client for all scripts.
+- `PUBLISH`: Will block client.
+- `PFCOUNT`: Will block client.
+- `WAIT`: Acknowledgements will be delayed, so this command will appear blocked.
+
This command is useful as it makes able to switch clients from a Redis instance
to another one in a controlled way. For example during an instance upgrade the
system administrator could do the following:
@@ -21,11 +35,16 @@ system administrator could do the following:
- Turn one of the replicas into a master.
- Reconfigure clients to connect with the new master.
-It is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the
-`INFO replication` command in order to get the current master offset at the time
-the clients are blocked. This way it is possible to wait for a specific offset
-in the replica side in order to make sure all the replication stream was
-processed.
+Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode
+will stop all replication traffic, can be aborted with the `CLIENT UNPAUSE`
+command, and allows reconfiguring the old master without risking accepting
+writes after the failover. This is also the mode used during cluster failover.
+
+For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC
+block together with the `INFO replication` command in order to get the current
+master offset at the time the clients are blocked. This way it is possible to
+wait for a specific offset in the replica side in order to make sure all the
+replication stream was processed.
Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or
expired during the time clients are paused. This way the dataset is guaranteed
@@ -36,3 +55,8 @@ but also from the point of view of internal operations.
@simple-string-reply: The command returns OK or an error if the timeout is
invalid.
+
+@history
+
+- `>= 3.2.10`: Client pause prevents client pause and key eviction as well.
+- `>= 6.2`: CLIENT PAUSE WRITE mode added along with the `mode` option.
diff --git a/iredis/data/commands/client-tracking.md b/iredis/data/commands/client-tracking.md
index 197c0aa..4dc379b 100644
--- a/iredis/data/commands/client-tracking.md
+++ b/iredis/data/commands/client-tracking.md
@@ -37,7 +37,9 @@ when enabling tracking:
notifications will be provided only for keys starting with this string. This
option can be given multiple times to register multiple prefixes. If
broadcasting is enabled without this option, Redis will send notifications for
- every key.
+ every key. You can't delete a single prefix, but you can delete all prefixes
+ by disabling and re-enabling tracking. Using this option adds the additional
+ time complexity of O(N^2), where N is the total number of prefixes tracked.
- `OPTIN`: when broadcasting is NOT active, normally don't track keys in read
only commands, unless they are called immediately after a `CLIENT CACHING yes`
command.
diff --git a/iredis/data/commands/client-trackinginfo.md b/iredis/data/commands/client-trackinginfo.md
new file mode 100644
index 0000000..55dc577
--- /dev/null
+++ b/iredis/data/commands/client-trackinginfo.md
@@ -0,0 +1,25 @@
+The command returns information about the current client connection's use of the
+[server assisted client side caching](/topics/client-side-caching) feature.
+
+@return
+
+@array-reply: a list of tracking information sections and their respective
+values, specifically:
+
+- **flags**: A list of tracking flags used by the connection. The flags and
+ their meanings are as follows:
+ - `off`: The connection isn't using server assisted client side caching.
+ - `on`: Server assisted client side caching is enabled for the connection.
+ - `bcast`: The client uses broadcasting mode.
+ - `optin`: The client does not cache keys by default.
+ - `optout`: The client caches keys by default.
+ - `caching-yes`: The next command will cache keys (exists only together with
+ `optin`).
+ - `caching-no`: The next command won't cache keys (exists only together with
+ `optout`).
+ - `noloop`: The client isn't notified about keys modified by itself.
+ - `broken_redirect`: The client ID used for redirection isn't valid anymore.
+- **redirect**: The client ID used for notifications redirection, or -1 when
+ none.
+- **prefixes**: A list of key prefixes for which notifications are sent to the
+ client.
diff --git a/iredis/data/commands/client-unpause.md b/iredis/data/commands/client-unpause.md
new file mode 100644
index 0000000..7d8dbb7
--- /dev/null
+++ b/iredis/data/commands/client-unpause.md
@@ -0,0 +1,6 @@
+`CLIENT UNPAUSE` is used to resume command processing for all clients that were
+paused by `CLIENT PAUSE`.
+
+@return
+
+@simple-string-reply: The command returns `OK`
diff --git a/iredis/data/commands/cluster-addslots.md b/iredis/data/commands/cluster-addslots.md
index f5668e5..b7b777a 100644
--- a/iredis/data/commands/cluster-addslots.md
+++ b/iredis/data/commands/cluster-addslots.md
@@ -46,7 +46,7 @@ bound with another node, or if the configuration epoch of the node advertising
the new hash slot, is greater than the node currently listed in the table.
This means that this command should be used with care only by applications
-orchestrating Redis Cluster, like `redis-trib`, and the command if used out of
+orchestrating Redis Cluster, like `redis-cli`, and the command if used out of
the right context can leave the cluster in a wrong state or cause data loss.
@return
diff --git a/iredis/data/commands/cluster-delslots.md b/iredis/data/commands/cluster-delslots.md
index 1924158..bd41bd7 100644
--- a/iredis/data/commands/cluster-delslots.md
+++ b/iredis/data/commands/cluster-delslots.md
@@ -38,7 +38,7 @@ node receiving the command:
This command only works in cluster mode and may be useful for debugging and in
order to manually orchestrate a cluster configuration when a new cluster is
-created. It is currently not used by `redis-trib`, and mainly exists for API
+created. It is currently not used by `redis-cli`, and mainly exists for API
completeness.
@return
diff --git a/iredis/data/commands/cluster-flushslots.md b/iredis/data/commands/cluster-flushslots.md
index 74204d1..8974a1d 100644
--- a/iredis/data/commands/cluster-flushslots.md
+++ b/iredis/data/commands/cluster-flushslots.md
@@ -3,6 +3,6 @@ Deletes all slots from a node.
The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected
node. It can only be called when the database is empty.
-@reply
+@return
@simple-string-reply: `OK`
diff --git a/iredis/data/commands/cluster-nodes.md b/iredis/data/commands/cluster-nodes.md
index 435ce2b..0bdbbba 100644
--- a/iredis/data/commands/cluster-nodes.md
+++ b/iredis/data/commands/cluster-nodes.md
@@ -11,8 +11,8 @@ additional info appended at the end).
Note that normally clients willing to fetch the map between Cluster hash slots
and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that
provides more information, should be used for administrative tasks, debugging,
-and configuration inspections. It is also used by `redis-trib` in order to
-manage a cluster.
+and configuration inspections. It is also used by `redis-cli` in order to manage
+a cluster.
## Serialization format
@@ -41,8 +41,8 @@ The meaning of each filed is the following:
2. `ip:port@cport`: The node address where clients should contact the node to
run queries.
3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`,
- `fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in
- detail in the next section.
+ `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are
+ explained in detail in the next section.
4. `master`: If the node is a replica, and the master is known, the master node
ID, otherwise the "-" character.
5. `ping-sent`: Milliseconds unix time at which the currently active ping was
@@ -74,6 +74,7 @@ Meaning of the flags (field number 3):
promoted the `PFAIL` state to `FAIL`.
- `handshake`: Untrusted node, we are handshaking.
- `noaddr`: No address known for this node.
+- `nofailover`: Replica will not try to failover.
- `noflags`: No flags at all.
## Notes on published config epochs
diff --git a/iredis/data/commands/cluster-setslot.md b/iredis/data/commands/cluster-setslot.md
index 6e5ecf3..213acba 100644
--- a/iredis/data/commands/cluster-setslot.md
+++ b/iredis/data/commands/cluster-setslot.md
@@ -65,9 +65,10 @@ already migrated to the target node are executed in the target node, so that:
## CLUSTER SETSLOT `<slot>` STABLE
This subcommand just clears migrating / importing state from the slot. It is
-mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`.
-Normally the two states are cleared automatically at the end of the migration
-using the `SETSLOT ... NODE ...` subcommand as explained in the next section.
+mainly used to fix a cluster stuck in a wrong state by
+`redis-cli --cluster fix`. Normally the two states are cleared automatically at
+the end of the migration using the `SETSLOT ... NODE ...` subcommand as
+explained in the next section.
## CLUSTER SETSLOT `<slot>` NODE `<node-id>`
diff --git a/iredis/data/commands/command.md b/iredis/data/commands/command.md
index 5028e70..2d2b1fc 100644
--- a/iredis/data/commands/command.md
+++ b/iredis/data/commands/command.md
@@ -73,7 +73,7 @@ Command flags is @array-reply containing one or more status replies:
- _write_ - command may result in modifications
- _readonly_ - command will never modify keys
-- _denyoom_ - reject command if currently OOM
+- _denyoom_ - reject command if currently out of memory
- _admin_ - server admin command
- _pubsub_ - pubsub-related command
- _noscript_ - deny this command from scripts
@@ -109,8 +109,12 @@ relevant key positions.
Complete list of commands currently requiring key location parsing:
- `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys
+- `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
+- `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
+- `ZDIFF` - keys stop after `numkeys` count arguments
+- `ZDIFFSTORE` - keys stop after `numkeys` count arguments
- `EVAL` - keys stop after `numkeys` count arguments
- `EVALSHA` - keys stop after `numkeys` count arguments
diff --git a/iredis/data/commands/config-set.md b/iredis/data/commands/config-set.md
index 24c4194..dba09c2 100644
--- a/iredis/data/commands/config-set.md
+++ b/iredis/data/commands/config-set.md
@@ -13,7 +13,7 @@ All the supported parameters have the same meaning of the equivalent
configuration parameter used in the [redis.conf][hgcarr22rc] file, with the
following important differences:
-[hgcarr22rc]: http://github.com/redis/redis/raw/2.8/redis.conf
+[hgcarr22rc]: http://github.com/redis/redis/raw/6.0/redis.conf
- In options where bytes or other quantities are specified, it is not possible
to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth),
diff --git a/iredis/data/commands/copy.md b/iredis/data/commands/copy.md
new file mode 100644
index 0000000..f9ff5b9
--- /dev/null
+++ b/iredis/data/commands/copy.md
@@ -0,0 +1,24 @@
+This command copies the value stored at the `source` key to the `destination`
+key.
+
+By default, the `destination` key is created in the logical database used by the
+connection. The `DB` option allows specifying an alternative logical database
+index for the destination key.
+
+The command returns an error when the `destination` key already exists. The
+`REPLACE` option removes the `destination` key before copying the value to it.
+
+@return
+
+@integer-reply, specifically:
+
+- `1` if `source` was copied.
+- `0` if `source` was not copied.
+
+@examples
+
+```
+SET dolly "sheep"
+COPY dolly clone
+GET clone
+```
diff --git a/iredis/data/commands/eval.md b/iredis/data/commands/eval.md
index d1d0346..3618ac6 100644
--- a/iredis/data/commands/eval.md
+++ b/iredis/data/commands/eval.md
@@ -214,6 +214,27 @@ format specified above (as a Lua table with an `err` field). The script can pass
the exact error to the user by returning the error object returned by
`redis.pcall()`.
+## Running Lua under low memory conditions
+
+When the memory usage in Redis exceeds the `maxmemory` limit, the first write
+command encountered in the Lua script that uses additional memory will cause the
+script to abort (unless `redis.pcall` was used). However, one thing to caution
+here is that if the first write command does not use additional memory such as
+DEL, LREM, or SREM, etc, Redis will allow it to run and all subsequent commands
+in the Lua script will execute to completion for atomicity. If the subsequent
+writes in the script generate additional memory, the Redis memory usage can go
+over `maxmemory`.
+
+Another possible way for Lua script to cause Redis memory usage to go above
+`maxmemory` happens when the script execution starts when Redis is slightly
+below `maxmemory` so the first write command in the script is allowed. As the
+script executes, subsequent write commands continue to generate memory and
+causes the Redis server to go above `maxmemory`.
+
+In those scenarios, it is recommended to configure the `maxmemory-policy` not to
+use `noeviction`. Also Lua scripts should be short so that evictions of items
+can happen in between Lua scripts.
+
## Bandwidth and EVALSHA
The `EVAL` command forces you to send the script body again and again. Redis
@@ -619,13 +640,13 @@ the cause of bugs.
## Using Lua scripting in RESP3 mode
-Starting with Redis version 6, the server supports two differnent protocols. One
+Starting with Redis version 6, the server supports two different protocols. One
is called RESP2, and is the old protocol: all the new connections to the server
start in this mode. However clients are able to negotiate the new protocol using
the `HELLO` command: this way the connection is put in RESP3 mode. In this mode
certain commands, like for instance `HGETALL`, reply with a new data type (the
Map data type in this specific case). The RESP3 protocol is semantically more
-powerful, however most scripts are ok with using just RESP2.
+powerful, however most scripts are OK with using just RESP2.
The Lua engine always assumes to run in RESP2 mode when talking with Redis, so
whatever the connection that is invoking the `EVAL` or `EVALSHA` command is in
@@ -669,7 +690,7 @@ At this point the new conversions are available, specifically:
- Lua table with a single `map` field set to a field-value Lua table -> Redis
map reply.
- Lua table with a single `set` field set to a field-value Lua table -> Redis
- set reply, the values are discared and can be anything.
+ set reply, the values are discarded and can be anything.
- Lua table with a single `double` field set to a field-value Lua table -> Redis
double reply.
- Lua null -> Redis RESP3 new null reply (protocol `"_\r\n"`).
diff --git a/iredis/data/commands/eval_ro.md b/iredis/data/commands/eval_ro.md
new file mode 100644
index 0000000..db8c707
--- /dev/null
+++ b/iredis/data/commands/eval_ro.md
@@ -0,0 +1,19 @@
+This is a read-only variant of the `EVAL` command that isn't allowed to execute
+commands that modify data.
+
+Unlike `EVAL`, scripts executed with this command can always be killed and never
+affect the replication stream. Because it can only read data, this command can
+always be executed on a master or a replica.
+
+@examples
+
+```
+> SET mykey "Hello"
+OK
+
+> EVAL_RO "return redis.call('GET', KEYS[1])" 1 mykey
+"Hello"
+
+> EVAL_RO "return redis.call('DEL', KEYS[1])" 1 mykey
+(error) ERR Error running script (call to f_359f69785f876b7f3f60597d81534f3d6c403284): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts
+```
diff --git a/iredis/data/commands/evalsha_ro.md b/iredis/data/commands/evalsha_ro.md
new file mode 100644
index 0000000..04368f3
--- /dev/null
+++ b/iredis/data/commands/evalsha_ro.md
@@ -0,0 +1,6 @@
+This is a read-only variant of the `EVALSHA` command that isn't allowed to
+execute commands that modify data.
+
+Unlike `EVALSHA`, scripts executed with this command can always be killed and
+never affect the replication stream. Because it can only read data, this command
+can always be executed on a master or a replica.
diff --git a/iredis/data/commands/expiretime.md b/iredis/data/commands/expiretime.md
new file mode 100644
index 0000000..1c4c815
--- /dev/null
+++ b/iredis/data/commands/expiretime.md
@@ -0,0 +1,22 @@
+Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which
+the given key will expire.
+
+See also the `PEXPIRETIME` command which returns the same information with
+milliseconds resolution.
+
+@return
+
+@integer-reply: Expiration Unix timestamp in seconds, or a negative value in
+order to signal an error (see the description below).
+
+- The command returns `-1` if the key exists but has no associated expiration
+ time.
+- The command returns `-2` if the key does not exist.
+
+@examples
+
+```cli
+SET mykey "Hello"
+EXPIREAT mykey 33177117420
+EXPIRETIME mykey
+```
diff --git a/iredis/data/commands/failover.md b/iredis/data/commands/failover.md
new file mode 100644
index 0000000..28be506
--- /dev/null
+++ b/iredis/data/commands/failover.md
@@ -0,0 +1,84 @@
+This command will start a coordinated failover between the
+currently-connected-to master and one of its replicas. The failover is not
+synchronous, instead a background task will handle coordinating the failover. It
+is designed to limit data loss and unavailability of the cluster during the
+failover. This command is analogous to the `CLUSTER FAILOVER` command for
+non-clustered Redis and is similar to the failover support provided by sentinel.
+
+The specific details of the default failover flow are as follows:
+
+1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause
+ incoming writes and prevent the accumulation of new data in the replication
+ stream.
+2. The master will monitor its replicas, waiting for a replica to indicate that
+ it has fully consumed the replication stream. If the master has multiple
+ replicas, it will only wait for the first replica to catch up.
+3. The master will then demote itself to a replica. This is done to prevent any
+ dual master scenarios. NOTE: The master will not discard its data, so it will
+ be able to rollback if the replica rejects the failover request in the next
+ step.
+4. The previous master will send a special PSYNC request to the target replica,
+ `PSYNC FAILOVER`, instructing the target replica to become a master.
+5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was
+ accepted it will unpause its clients. If the PSYNC request is rejected, the
+ master will abort the failover and return to normal.
+
+The field `master_failover_state` in `INFO replication` can be used to track the
+current state of the failover, which has the following values:
+
+- `no-failover`: There is no ongoing coordinated failover.
+- `waiting-for-sync`: The master is waiting for the replica to catch up to its
+ replication offset.
+- `failover-in-progress`: The master has demoted itself, and is attempting to
+ hand off ownership to a target replica.
+
+If the previous master had additional replicas attached to it, they will
+continue replicating from it as chained replicas. You will need to manually
+execute a `REPLICAOF` on these replicas to start replicating directly from the
+new master.
+
+## Optional arguments
+
+The following optional arguments exist to modify the behavior of the failover
+flow:
+
+- `TIMEOUT` _milliseconds_ -- This option allows specifying a maximum time a
+ master will wait in the `waiting-for-sync` state before aborting the failover
+ attempt and rolling back. This is intended to set an upper bound on the write
+ outage the Redis cluster can experience. Failovers typically happen in less
+ than a second, but could take longer if there is a large amount of write
+ traffic or the replica is already behind in consuming the replication stream.
+ If this value is not specified, the timeout can be considered to be
+ "infinite".
+
+- `TO` _HOST_ _PORT_ -- This option allows designating a specific replica, by
+ its host and port, to failover to. The master will wait specifically for this
+ replica to catch up to its replication offset, and then failover to it.
+
+- `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can
+ also be used to designate that that once the timeout has elapsed, the master
+ should failover to the target replica instead of rolling back. This can be
+ used for a best-effort attempt at a failover without data loss, but limiting
+ write outage.
+
+NOTE: The master will always rollback if the `PSYNC FAILOVER` request is
+rejected by the target replica.
+
+## Failover abort
+
+The failover command is intended to be safe from data loss and corruption, but
+can encounter some scenarios it can not automatically remediate from and may get
+stuck. For this purpose, the `FAILOVER ABORT` command exists, which will abort
+an ongoing failover and return the master to its normal state. The command has
+no side effects if issued in the `waiting-for-sync` state but can introduce
+multi-master scenarios in the `failover-in-progress` state. If a multi-master
+scenario is encountered, you will need to manually identify which master has the
+latest data and designate it as the master and have the other replicas.
+
+NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to
+prevent unintended interactions with the failover that might cause data loss.
+
+@return
+
+@simple-string-reply: `OK` if the command was accepted and a coordinated
+failover is in progress. An error if the operation cannot be executed.
diff --git a/iredis/data/commands/flushall.md b/iredis/data/commands/flushall.md
index 8cc7560..ff454c1 100644
--- a/iredis/data/commands/flushall.md
+++ b/iredis/data/commands/flushall.md
@@ -1,19 +1,26 @@
Delete all the keys of all the existing databases, not just the currently
selected one. This command never fails.
-The time-complexity for this operation is O(N), N being the number of keys in
-all existing databases.
+By default, `FLUSHALL` will synchronously flush all the databases. Starting with
+Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
+"yes" changes the default flush mode to asynchronous.
-## `FLUSHALL ASYNC` (Redis 4.0.0 or greater)
+It is possible to use one of the following modifiers to dictate the flushing
+mode explicitly:
-Redis is now able to delete keys in the background in a different thread without
-blocking the server. An `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in
-order to let the entire dataset or a single database to be freed asynchronously.
+- `ASYNC`: flushes the databases asynchronously
+- `!SYNC`: flushes the databases synchronously
-Asynchronous `FLUSHALL` and `FLUSHDB` commands only delete keys that were
-present at the time the command was invoked. Keys created during an asynchronous
-flush will be unaffected.
+Note: an asynchronous `FLUSHALL` command only deletes keys that were present at
+the time the command was invoked. Keys created during an asynchronous flush will
+be unaffected.
@return
@simple-string-reply
+
+@history
+
+- `>= 4.0.0`: Added the `ASYNC` flushing mode modifier.
+- `>= 6.2.0`: Added the `!SYNC` flushing mode modifier and the
+ **lazyfree-lazy-user-flush** configuration directive.
diff --git a/iredis/data/commands/flushdb.md b/iredis/data/commands/flushdb.md
index 8c76001..ee47cc1 100644
--- a/iredis/data/commands/flushdb.md
+++ b/iredis/data/commands/flushdb.md
@@ -1,11 +1,18 @@
Delete all the keys of the currently selected DB. This command never fails.
-The time-complexity for this operation is O(N), N being the number of keys in
-the database.
+By default, `FLUSHDB` will synchronously flush all keys from the database.
+Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration
+directive to "yes" changes the default flush mode to asynchronous.
-## `FLUSHDB ASYNC` (Redis 4.0.0 or greater)
+It is possible to use one of the following modifiers to dictate the flushing
+mode explicitly:
-See `FLUSHALL` for documentation.
+- `ASYNC`: flushes the database asynchronously
+- `!SYNC`: flushes the database synchronously
+
+Note: an asynchronous `FLUSHDB` command only deletes keys that were present at
+the time the command was invoked. Keys created during an asynchronous flush will
+be unaffected.
@return
diff --git a/iredis/data/commands/geoadd.md b/iredis/data/commands/geoadd.md
index 32751db..65365bb 100644
--- a/iredis/data/commands/geoadd.md
+++ b/iredis/data/commands/geoadd.md
@@ -1,12 +1,13 @@
-Adds the specified geospatial items (latitude, longitude, name) to the specified
+Adds the specified geospatial items (longitude, latitude, name) to the specified
key. Data is stored into the key as a sorted set, in a way that makes it
-possible to later retrieve items using a query by radius with the `GEORADIUS` or
-`GEORADIUSBYMEMBER` commands.
+possible to query the items with the `GEOSEARCH` command.
The command takes arguments in the standard format x,y so the longitude must be
specified before the latitude. There are limits to the coordinates that can be
-indexed: areas very near to the poles are not indexable. The exact limits, as
-specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
+indexed: areas very near to the poles are not indexable.
+
+The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the
+following:
- Valid longitudes are from -180 to 180 degrees.
- Valid latitudes are from -85.05112878 to 85.05112878 degrees.
@@ -14,37 +15,58 @@ specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
The command will report an error when the user attempts to index coordinates
outside the specified ranges.
-**Note:** there is no **GEODEL** command because you can use `ZREM` in order to
-remove elements. The Geo index structure is just a sorted set.
+**Note:** there is no **GEODEL** command because you can use `ZREM` to remove
+elements. The Geo index structure is just a sorted set.
+
+## GEOADD options
+
+`GEOADD` also provides the following options:
+
+- **XX**: Only update elements that already exist. Never add elements.
+- **NX**: Don't update already existing elements. Always add new elements.
+- **CH**: Modify the return value from the number of new elements added, to the
+ total number of elements changed (CH is an abbreviation of _changed_). Changed
+ elements are **new elements added** and elements already existing for which
+ **the coordinates was updated**. So elements specified in the command line
+ having the same score as they had in the past are not counted. Note: normally,
+ the return value of `GEOADD` only counts the number of new elements added.
+
+Note: The **XX** and **NX** options are mutually exclusive.
## How does it work?
The way the sorted set is populated is using a technique called
[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude bits
-are interleaved in order to form an unique 52 bit integer. We know that a sorted
-set double score can represent a 52 bit integer without losing precision.
+are interleaved to form a unique 52-bit integer. We know that a sorted set
+double score can represent a 52-bit integer without losing precision.
-This format allows for radius querying by checking the 1+8 areas needed to cover
-the whole radius, and discarding elements outside the radius. The areas are
-checked by calculating the range of the box covered removing enough bits from
-the less significant part of the sorted set score, and computing the score range
-to query in the sorted set for each area.
+This format allows for bounding box and radius querying by checking the 1+8
+areas needed to cover the whole shape and discarding elements outside it. The
+areas are checked by calculating the range of the box covered, removing enough
+bits from the less significant part of the sorted set score, and computing the
+score range to query in the sorted set for each area.
## What Earth model does it use?
-It just assumes that the Earth is a sphere, since the used distance formula is
-the Haversine formula. This formula is only an approximation when applied to the
+The model assumes that the Earth is a sphere since it uses the Haversine formula
+to calculate distance. This formula is only an approximation when applied to the
Earth, which is not a perfect sphere. The introduced errors are not an issue
-when used in the context of social network sites that need to query by radius
-and most other applications. However in the worst case the error may be up to
-0.5%, so you may want to consider other systems for error-critical applications.
+when used, for example, by social networks and similar applications requiring
+this type of querying. However, in the worst case, the error may be up to 0.5%,
+so you may want to consider other systems for error-critical applications.
@return
@integer-reply, specifically:
-- The number of elements added to the sorted set, not including elements already
- existing for which the score was updated.
+- When used without optional arguments, the number of elements added to the
+ sorted set (excluding score updates).
+- If the `CH` option is specified, the number of elements that were changed
+ (added or updated).
+
+@history
+
+- `>= 6.2`: Added the `CH`, `NX` and `XX` options.
@examples
diff --git a/iredis/data/commands/georadius.md b/iredis/data/commands/georadius.md
index a1dd20d..fb6db1b 100644
--- a/iredis/data/commands/georadius.md
+++ b/iredis/data/commands/georadius.md
@@ -2,6 +2,9 @@ Return the members of a sorted set populated with geospatial information using
`GEOADD`, which are within the borders of the area specified with the center
location and the maximum distance from the center (the radius).
+As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
+prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
+
This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`
variants (see the section below for more information).
@@ -38,11 +41,13 @@ can be invoked using the following two options:
By default all the matching items are returned. It is possible to limit the
results to the first N matching items by using the **COUNT `<count>`** option.
-However note that internally the command needs to perform an effort proportional
-to the number of items matching the specified area, so to query very large areas
-with a very small `COUNT` option may be slow even if just a few results are
-returned. On the other hand `COUNT` can be a very effective way to reduce
-bandwidth usage if normally just the first results are used.
+When `ANY` is provided the command will return as soon as enough matches are
+found, so the results may not be the ones closest to the specified point, but on
+the other hand, the effort invested by the server is significantly lower. When
+`ANY` is not provided, the command will perform an effort that is proportional
+to the number of items matching the specified area and sort them, so to query
+very large areas with a very small `COUNT` option may be slow even if just a few
+results are returned.
By default the command returns the items to the client. It is possible to store
the results with one of these options:
@@ -93,6 +98,10 @@ They are exactly like the original commands but refuse the `STORE` and
Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively.
+@history
+
+- `>= 6.2`: Added the `ANY` option for `COUNT`.
+
@examples
```cli
diff --git a/iredis/data/commands/georadiusbymember.md b/iredis/data/commands/georadiusbymember.md
index 88c1be4..43d0039 100644
--- a/iredis/data/commands/georadiusbymember.md
+++ b/iredis/data/commands/georadiusbymember.md
@@ -3,6 +3,9 @@ of taking, as the center of the area to query, a longitude and latitude value,
it takes the name of a member already existing inside the geospatial index
represented by the sorted set.
+As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
+prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
+
The position of the specified member is used as the center of the query.
Please check the example below and the `GEORADIUS` documentation for more
diff --git a/iredis/data/commands/geosearch.md b/iredis/data/commands/geosearch.md
new file mode 100644
index 0000000..014f443
--- /dev/null
+++ b/iredis/data/commands/geosearch.md
@@ -0,0 +1,77 @@
+Return the members of a sorted set populated with geospatial information using
+`GEOADD`, which are within the borders of the area specified by a given shape.
+This command extends the `GEORADIUS` command, so in addition to searching within
+circular areas, it supports searching within rectangular areas.
+
+This command should be used in place of the deprecated `GEORADIUS` and
+`GEORADIUSBYMEMBER` commands.
+
+The query's center point is provided by one of these mandatory options:
+
+- `FROMMEMBER`: Use the position of the given existing `<member>` in the sorted
+ set.
+- `FROMLONLAT`: Use the given `<longitude>` and `<latitude>` position.
+
+The query's shape is provided by one of these mandatory options:
+
+- `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to
+ given `<radius>`.
+- `BYBOX`: Search inside an axis-aligned rectangle, determined by `<height>` and
+ `<width>`.
+
+The command optionally returns additional information using the following
+options:
+
+- `WITHDIST`: Also return the distance of the returned items from the specified
+ center point. The distance is returned in the same unit as specified for the
+ radius or height and width arguments.
+- `WITHCOORD`: Also return the longitude and latitude of the matching items.
+- `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item,
+ in the form of a 52 bit unsigned integer. This is only useful for low level
+ hacks or debugging and is otherwise of little interest for the general user.
+
+Matching items are returned unsorted by default. To sort them, use one of the
+following two options:
+
+- `ASC`: Sort returned items from the nearest to the farthest, relative to the
+ center point.
+- `DESC`: Sort returned items from the farthest to the nearest, relative to the
+ center point.
+
+All matching items are returned by default. To limit the results to the first N
+matching items, use the **COUNT `<count>`** option. When the `ANY` option is
+used, the command returns as soon as enough matches are found. This means that
+the results returned may not be the ones closest to the specified point, but the
+effort invested by the server to generate them is significantly less. When `ANY`
+is not provided, the command will perform an effort that is proportional to the
+number of items matching the specified area and sort them, so to query very
+large areas with a very small `COUNT` option may be slow even if just a few
+results are returned.
+
+@return
+
+@array-reply, specifically:
+
+- Without any `WITH` option specified, the command just returns a linear array
+ like ["New York","Milan","Paris"].
+- If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command
+ returns an array of arrays, where each sub-array represents a single item.
+
+When additional information is returned as an array of arrays for each item, the
+first item in the sub-array is always the name of the returned item. The other
+information is returned in the following order as successive elements of the
+sub-array.
+
+1. The distance from the center as a floating point number, in the same unit
+ specified in the shape.
+2. The geohash integer.
+3. The coordinates as a two items x,y array (longitude,latitude).
+
+@examples
+
+```cli
+GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2"
+GEOSEARCH Sicily FROMLONLAT 15 37 BYRADIUS 200 km ASC
+GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC
+```
diff --git a/iredis/data/commands/geosearchstore.md b/iredis/data/commands/geosearchstore.md
new file mode 100644
index 0000000..6a34bfa
--- /dev/null
+++ b/iredis/data/commands/geosearchstore.md
@@ -0,0 +1,11 @@
+This command is like `GEOSEARCH`, but stores the result in destination key.
+
+This command comes in place of the now deprecated `GEORADIUS` and
+`GEORADIUSBYMEMBER`.
+
+By default, it stores the results in the `destination` sorted set with their
+geospatial information.
+
+When using the `STOREDIST` option, the command stores the items in a sorted set
+populated with their distance from the center of the circle or box, as a
+floating-point number, in the same unit specified for that shape.
diff --git a/iredis/data/commands/getdel.md b/iredis/data/commands/getdel.md
new file mode 100644
index 0000000..fbd9f72
--- /dev/null
+++ b/iredis/data/commands/getdel.md
@@ -0,0 +1,16 @@
+Get the value of `key` and delete the key. This command is similar to `GET`,
+except for the fact that it also deletes the key on success (if and only if the
+key's value type is a string).
+
+@return
+
+@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an
+error if the key's value type isn't a string.
+
+@examples
+
+```cli
+SET mykey "Hello"
+GETDEL mykey
+GET mykey
+```
diff --git a/iredis/data/commands/getex.md b/iredis/data/commands/getex.md
new file mode 100644
index 0000000..27dfc3d
--- /dev/null
+++ b/iredis/data/commands/getex.md
@@ -0,0 +1,28 @@
+Get the value of `key` and optionally set its expiration. `GETEX` is similar to
+`GET`, but is a write command with additional options.
+
+## Options
+
+The `GETEX` command supports a set of options that modify its behavior:
+
+- `EX` _seconds_ -- Set the specified expire time, in seconds.
+- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
+- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
+ will expire, in seconds.
+- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
+ key will expire, in milliseconds.
+- `PERSIST` -- Remove the time to live associated with the key.
+
+@return
+
+@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist.
+
+@examples
+
+```cli
+SET mykey "Hello"
+GETEX mykey
+TTL mykey
+GETEX mykey EX 60
+TTL mykey
+```
diff --git a/iredis/data/commands/getset.md b/iredis/data/commands/getset.md
index 20b86a2..64b1cba 100644
--- a/iredis/data/commands/getset.md
+++ b/iredis/data/commands/getset.md
@@ -1,5 +1,7 @@
Atomically sets `key` to `value` and returns the old value stored at `key`.
-Returns an error when `key` exists but does not hold a string value.
+Returns an error when `key` exists but does not hold a string value. Any
+previous time to live associated with the key is discarded on successful `SET`
+operation.
## Design pattern
@@ -14,6 +16,9 @@ GETSET mycounter "0"
GET mycounter
```
+As per Redis 6.2, GETSET is considered deprecated. Please prefer `SET` with
+`GET` parameter in new code.
+
@return
@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not
diff --git a/iredis/data/commands/hello.md b/iredis/data/commands/hello.md
index b7a9dc4..c229cc1 100644
--- a/iredis/data/commands/hello.md
+++ b/iredis/data/commands/hello.md
@@ -1,16 +1,41 @@
-Switch the connection to a different protocol. Redis version 6 or greater are
-able to support two protocols, the old protocol, RESP2, and a new one introduced
-with Redis 6, RESP3. RESP3 has certain advantages since when the connection is
-in this mode, Redis is able to reply with more semantical replies: for instance
-`HGETALL` will return a _map type_, so a client library implementation no longer
-requires to know in advance to translate the array into a hash before returning
-it to the caller. For a full coverage of RESP3 please
+Switch to a different protocol, optionally authenticating and setting the
+connection's name, or provide a contextual client report.
+
+Redis version 6 and above supports two protocols: the old protocol, RESP2, and a
+new one introduced with Redis 6, RESP3. RESP3 has certain advantages since when
+the connection is in this mode, Redis is able to reply with more semantical
+replies: for instance, `HGETALL` will return a _map type_, so a client library
+implementation no longer requires to know in advance to translate the array into
+a hash before returning it to the caller. For a full coverage of RESP3, please
[check this repository](https://github.com/antirez/resp3).
-Redis 6 connections starts in RESP2 mode, so clients implementing RESP2 do not
-need to change (nor there are short term plans to drop support for RESP2).
-Clients that want to handshake the RESP3 mode need to call the `HELLO` command,
-using "3" as first argument.
+In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do not
+need to updated or changed. There are no short term plans to drop support for
+RESP2, although future version may default to RESP3.
+
+`HELLO` always replies with a list of current server and connection properties,
+such as: versions, modules loaded, client ID, replication role and so forth.
+When called without any arguments in Redis 6.2 and its default use of RESP2
+protocol, the reply looks like this:
+
+ > HELLO
+ 1) "server"
+ 2) "redis"
+ 3) "version"
+ 4) "255.255.255"
+ 5) "proto"
+ 6) (integer) 2
+ 7) "id"
+ 8) (integer) 5
+ 9) "mode"
+ 10) "standalone"
+ 11) "role"
+ 12) "master"
+ 13) "modules"
+ 14) (empty array)
+
+Clients that want to handshake using the RESP3 mode need to call the `HELLO`
+command and specify the value "3" as the `protover` argument, like so:
> HELLO 3
1# "server" => "redis"
@@ -21,26 +46,28 @@ using "3" as first argument.
6# "role" => "master"
7# "modules" => (empty array)
-The `HELLO` command has a useful reply that will state a number of facts about
-the server: the exact version, the set of modules loaded, the client ID, the
-replication role and so forth. Because of that, and given that the `HELLO`
-command also works with "2" as argument, both in order to downgrade the protocol
-back to version 2, or just to get the reply from the server without switching
-the protocol, client library authors may consider using this command instead of
-the canonical `PING` when setting up the connection.
-
-This command accepts two non mandatory options:
-
-- `AUTH <username> <password>`: directly authenticate the connection other than
- switching to the specified protocol. In this way there is no need to call
- `AUTH` before `HELLO` when setting up new connections. Note that the username
- can be set to "default" in order to authenticate against a server that does
- not use ACLs, but the simpler `requirepass` mechanism of Redis before
+Because `HELLO` replies with useful information, and given that `protover` is
+optional or can be set to "2", client library authors may consider using this
+command instead of the canonical `PING` when setting up the connection.
+
+When called with the optional `protover` argument, this command switches the
+protocol to the specified version and also accepts the following options:
+
+- `AUTH <username> <password>`: directly authenticate the connection in addition
+ to switching to the specified protocol version. This makes calling `AUTH`
+ before `HELLO` unnecessary when setting up a new connection. Note that the
+ `username` can be set to "default" to authenticate against a server that does
+ not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to
version 6.
-- `SETNAME <clientname>`: this is equivalent to also call `CLIENT SETNAME`.
+- `SETNAME <clientname>`: this is the equivalent of calling `CLIENT SETNAME`.
@return
@array-reply: a list of server properties. The reply is a map instead of an
-array when RESP3 is selected. The command returns an error if the protocol
+array when RESP3 is selected. The command returns an error if the `protover`
requested does not exist.
+
+@history
+
+- `>= 6.2`: `protover` made optional; when called without arguments the command
+ reports the current connection's context.
diff --git a/iredis/data/commands/hmset.md b/iredis/data/commands/hmset.md
index f66a364..0b2876a 100644
--- a/iredis/data/commands/hmset.md
+++ b/iredis/data/commands/hmset.md
@@ -2,7 +2,7 @@ Sets the specified fields to their respective values in the hash stored at
`key`. This command overwrites any specified fields already existing in the
hash. If `key` does not exist, a new key holding a hash is created.
-As per Redis 4.0.0, HMSET is considered deprecated. Please use `HSET` in new
+As per Redis 4.0.0, HMSET is considered deprecated. Please prefer `HSET` in new
code.
@return
diff --git a/iredis/data/commands/hrandfield.md b/iredis/data/commands/hrandfield.md
new file mode 100644
index 0000000..c6c82c5
--- /dev/null
+++ b/iredis/data/commands/hrandfield.md
@@ -0,0 +1,50 @@
+When called with just the `key` argument, return a random field from the hash
+value stored at `key`.
+
+If the provided `count` argument is positive, return an array of **distinct
+fields**. The array's length is either `count` or the hash's number of fields
+(`HLEN`), whichever is lower.
+
+If called with a negative `count`, the behavior changes and the command is
+allowed to return the **same field multiple times**. In this case, the number of
+returned fields is the absolute value of the specified `count`.
+
+The optional `WITHVALUES` modifier changes the reply so it includes the
+respective values of the randomly selected hash fields.
+
+@return
+
+@bulk-string-reply: without the additional `count` argument, the command returns
+a Bulk Reply with the randomly selected field, or `nil` when `key` does not
+exist.
+
+@array-reply: when the additional `count` argument is passed, the command
+returns an array of fields, or an empty array when `key` does not exist. If the
+`WITHVALUES` modifier is used, the reply is a list fields and their values from
+the hash.
+
+@examples
+
+```cli
+HMSET coin heads obverse tails reverse edge null
+HRANDFIELD coin
+HRANDFIELD coin
+HRANDFIELD coin -5 WITHVALUES
+```
+
+## Specification of the behavior when count is passed
+
+When the `count` argument is a positive value this command behaves as follows:
+
+- No repeated fields are returned.
+- If `count` is bigger than the number of fields in the hash, the command will
+ only return the whole hash without additional fields.
+- The order of fields in the reply is not truly random, so it is up to the
+ client to shuffle them if needed.
+
+When the `count` is a negative value, the behavior changes as follows:
+
+- Repeating fields are possible.
+- Exactly `count` fields, or an empty array if the hash is empty (non-existing
+ key), are always returned.
+- The order of fields in the reply is truly random.
diff --git a/iredis/data/commands/incr.md b/iredis/data/commands/incr.md
index 8d8b012..110510e 100644
--- a/iredis/data/commands/incr.md
+++ b/iredis/data/commands/incr.md
@@ -65,14 +65,14 @@ The more simple and direct implementation of this pattern is the following:
FUNCTION LIMIT_API_CALL(ip)
ts = CURRENT_UNIX_TIME()
keyname = ip+":"+ts
-current = GET(keyname)
-IF current != NULL AND current > 10 THEN
+MULTI
+ INCR(keyname)
+ EXPIRE(keyname,10)
+EXEC
+current = RESPONSE_OF_INCR_WITHIN_MULTI
+IF current > 10 THEN
ERROR "too many requests per second"
ELSE
- MULTI
- INCR(keyname,1)
- EXPIRE(keyname,10)
- EXEC
PERFORM_API_CALL()
END
```
@@ -119,7 +119,7 @@ script that is send using the `EVAL` command (only available since Redis version
```
local current
current = redis.call("incr",KEYS[1])
-if tonumber(current) == 1 then
+if current == 1 then
redis.call("expire",KEYS[1],1)
end
```
diff --git a/iredis/data/commands/info.md b/iredis/data/commands/info.md
index 8dcebc1..3a24c42 100644
--- a/iredis/data/commands/info.md
+++ b/iredis/data/commands/info.md
@@ -15,6 +15,7 @@ The optional parameter can be used to select a specific section of information:
- `modules`: Modules section
- `keyspace`: Database related statistics
- `modules`: Module related sections
+- `errorstats`: Redis error statistics
It can also take the following values:
@@ -60,6 +61,7 @@ Here is the meaning of all fields in the **server** section:
- `run_id`: Random value identifying the Redis server (to be used by Sentinel
and Cluster)
- `tcp_port`: TCP/IP listen port
+- `server_time_in_usec`: Epoch-based system time with microsecond precision
- `uptime_in_seconds`: Number of seconds since Redis server start
- `uptime_in_days`: Same value expressed in days
- `hz`: The server's current frequency setting
@@ -72,14 +74,20 @@ Here is the meaning of all fields in the **clients** section:
- `connected_clients`: Number of client connections (excluding connections from
replicas)
+- `cluster_connections`: An approximation of the number of sockets used by the
+ cluster's bus
+- `maxclients`: The value of the `maxclients` configuration directive. This is
+ the upper limit for the sum of `connected_clients`, `connected_slaves` and
+ `cluster_connections`.
- `client_longest_output_list`: Longest output list among current client
connections
- `client_biggest_input_buf`: Biggest input buffer among current client
connections
- `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`,
- `BRPOP`, `BRPOPLPUSH`, `BZPOPMIN`, `BZPOPMAX`)
+ `BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`)
- `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`)
- `clients_in_timeout_table`: Number of clients in the clients timeout table
+- `io_threads_active`: Flag indicating if I/O threads are active
Here is the meaning of all fields in the **memory** section:
@@ -143,6 +151,15 @@ by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`.
Here is the meaning of all fields in the **persistence** section:
- `loading`: Flag indicating if the load of a dump file is on-going
+- `current_cow_size`: The size in bytes of copy-on-write memory while a child
+ fork is running
+- `current_fork_perc`: The percentage of progress of the current fork process.
+ For AOF and RDB forks it is the percentage of `current_save_keys_processed`
+ out of `current_save_keys_total`.
+- `current_save_keys_processed`: Number of keys processed by the current save
+ operation
+- `current_save_keys_total`: Number of keys at the beginning of the current save
+ operation
- `rdb_changes_since_last_save`: Number of changes since the last dump
- `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going
- `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save
@@ -150,8 +167,8 @@ Here is the meaning of all fields in the **persistence** section:
- `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds
- `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if
any
-- `rdb_last_cow_size`: The size in bytes of copy-on-write allocations during the
- last RDB save operation
+- `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last
+ RDB save operation
- `aof_enabled`: Flag indicating AOF logging is activated
- `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going
- `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be
@@ -162,11 +179,11 @@ Here is the meaning of all fields in the **persistence** section:
if any
- `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation
- `aof_last_write_status`: Status of the last write operation to the AOF
-- `aof_last_cow_size`: The size in bytes of copy-on-write allocations during the
- last AOF rewrite operation
+- `aof_last_cow_size`: The size in bytes of copy-on-write memory during the last
+ AOF rewrite operation
- `module_fork_in_progress`: Flag indicating a module fork is on-going
-- `module_fork_last_cow_size`: The size in bytes of copy-on-write allocations
- during the last module fork operation
+- `module_fork_last_cow_size`: The size in bytes of copy-on-write memory during
+ the last module fork operation
`rdb_changes_since_last_save` refers to the number of operations that produced
some kind of changes in the dataset since the last time either `SAVE` or
@@ -187,6 +204,8 @@ If a load operation is on-going, these additional fields will be added:
- `loading_start_time`: Epoch-based timestamp of the start of the load operation
- `loading_total_bytes`: Total file size
+- `loading_rdb_used_mem`: The memory usage of the server that had generated the
+ RDB file at the time of the file's creation
- `loading_loaded_bytes`: Number of bytes already loaded
- `loading_loaded_perc`: Same value expressed as a percentage
- `loading_eta_seconds`: ETA in seconds for the load to be complete
@@ -218,6 +237,7 @@ Here is the meaning of all fields in the **stats** section:
- `pubsub_channels`: Global number of pub/sub channels with client subscriptions
- `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions
- `latest_fork_usec`: Duration of the latest fork operation in microseconds
+- `total_forks`: Total number of fork operations since the server start
- `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes
- `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes
(applicable only to writable replicas)
@@ -235,12 +255,22 @@ Here is the meaning of all fields in the **stats** section:
(only applicable for broadcast mode)
- `unexpected_error_replies`: Number of unexpected error replies, that are types
of errors from an AOF load or replication
+- `total_error_replies`: Total number of issued error replies, that is the sum
+ of rejected commands (errors prior command execution) and failed commands
+ (errors within the command execution)
+- `total_reads_processed`: Total number of read events processed
+- `total_writes_processed`: Total number of write events processed
+- `io_threaded_reads_processed`: Number of read events processed by the main and
+ I/O threads
+- `io_threaded_writes_processed`: Number of write events processed by the main
+ and I/O threads
Here is the meaning of all fields in the **replication** section:
- `role`: Value is "master" if the instance is replica of no one, or "slave" if
the instance is a replica of some master instance. Note that a replica can be
master of another replica (chained replication).
+- `master_failover_state`: The state of an ongoing failover, if any.
- `master_replid`: The replication ID of the Redis server.
- `master_replid2`: The secondary replication ID, used for PSYNC after a
failover.
@@ -267,7 +297,15 @@ If the instance is a replica, these additional fields are provided:
If a SYNC operation is on-going, these additional fields are provided:
-- `master_sync_left_bytes`: Number of bytes left before syncing is complete
+- `master_sync_total_bytes`: Total number of bytes that need to be transferred.
+ this may be 0 when the size is unknown (for example, when the
+ `repl-diskless-sync` configuration directive is used)
+- `master_sync_read_bytes`: Number of bytes already transferred
+- `master_sync_left_bytes`: Number of bytes left before syncing is complete (may
+ be negative when `master_sync_total_bytes` is 0)
+- `master_sync_perc`: The percentage `master_sync_read_bytes` from
+ `master_sync_total_bytes`, or an approximation that uses
+ `loading_rdb_used_mem` when `master_sync_total_bytes` is 0
- `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O
during a SYNC operation
@@ -291,18 +329,36 @@ For each replica, the following line is added:
Here is the meaning of all fields in the **cpu** section:
-- `used_cpu_sys`: System CPU consumed by the Redis server
-- `used_cpu_user`:User CPU consumed by the Redis server
+- `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of
+ system CPU consumed by all threads of the server process (main thread and
+ background threads)
+- `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of
+ user CPU consumed by all threads of the server process (main thread and
+ background threads)
- `used_cpu_sys_children`: System CPU consumed by the background processes
- `used_cpu_user_children`: User CPU consumed by the background processes
+- `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main
+ thread
+- `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread
The **commandstats** section provides statistics based on the command type,
-including the number of calls, the total CPU time consumed by these commands,
-and the average CPU consumed per command execution.
+including the number of calls that reached command execution (not rejected), the
+total CPU time consumed by these commands, the average CPU consumed per command
+execution, the number of rejected calls (errors prior command execution), and
+the number of failed calls (errors within the command execution).
For each command type, the following line is added:
-- `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX`
+- `cmdstat_XXX`:
+ `calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX`
+
+The **errorstats** section enables keeping track of the different errors that
+occurred within Redis, based upon the reply error prefix ( The first word after
+the "-", up to the first space. Example: `ERR` ).
+
+For each error type, the following line is added:
+
+- `errorstat_XXX`: `count=XXX`
The **cluster** section currently only contains a unique field:
diff --git a/iredis/data/commands/latency-reset.md b/iredis/data/commands/latency-reset.md
index cec6f06..8d089e6 100644
--- a/iredis/data/commands/latency-reset.md
+++ b/iredis/data/commands/latency-reset.md
@@ -31,6 +31,6 @@ For more information refer to the [Latency Monitoring Framework page][lm].
[lm]: /topics/latency-monitor
-@reply
+@return
@integer-reply: the number of event time series that were reset.
diff --git a/iredis/data/commands/lmove.md b/iredis/data/commands/lmove.md
new file mode 100644
index 0000000..9100315
--- /dev/null
+++ b/iredis/data/commands/lmove.md
@@ -0,0 +1,77 @@
+Atomically returns and removes the first/last element (head/tail depending on
+the `wherefrom` argument) of the list stored at `source`, and pushes the element
+at the first/last element (head/tail depending on the `whereto` argument) of the
+list stored at `destination`.
+
+For example: consider `source` holding the list `a,b,c`, and `destination`
+holding the list `x,y,z`. Executing `LMOVE source destination RIGHT LEFT`
+results in `source` holding `a,b` and `destination` holding `c,x,y,z`.
+
+If `source` does not exist, the value `nil` is returned and no operation is
+performed. If `source` and `destination` are the same, the operation is
+equivalent to removing the first/last element from the list and pushing it as
+first/last element of the list, so it can be considered as a list rotation
+command (or a no-op if `wherefrom` is the same as `whereto`).
+
+This command comes in place of the now deprecated `RPOPLPUSH`. Doing
+`LMOVE RIGHT LEFT` is equivalent.
+
+@return
+
+@bulk-string-reply: the element being popped and pushed.
+
+@examples
+
+```cli
+RPUSH mylist "one"
+RPUSH mylist "two"
+RPUSH mylist "three"
+LMOVE mylist myotherlist RIGHT LEFT
+LMOVE mylist myotherlist LEFT RIGHT
+LRANGE mylist 0 -1
+LRANGE myotherlist 0 -1
+```
+
+## Pattern: Reliable queue
+
+Redis is often used as a messaging server to implement processing of background
+jobs or other kinds of messaging tasks. A simple form of queue is often obtained
+pushing values into a list in the producer side, and waiting for this values in
+the consumer side using `RPOP` (using polling), or `BRPOP` if the client is
+better served by a blocking operation.
+
+However in this context the obtained queue is not _reliable_ as messages can be
+lost, for example in the case there is a network problem or if the consumer
+crashes just after the message is received but it is still to process.
+
+`LMOVE` (or `BLMOVE` for the blocking variant) offers a way to avoid this
+problem: the consumer fetches the message and at the same time pushes it into a
+_processing_ list. It will use the `LREM` command in order to remove the message
+from the _processing_ list once the message has been processed.
+
+An additional client may monitor the _processing_ list for items that remain
+there for too much time, and will push those timed out items into the queue
+again if needed.
+
+## Pattern: Circular list
+
+Using `LMOVE` with the same source and destination key, a client can visit all
+the elements of an N-elements list, one after the other, in O(N) without
+transferring the full list from the server to the client using a single `LRANGE`
+operation.
+
+The above pattern works even if the following two conditions:
+
+- There are multiple clients rotating the list: they'll fetch different
+ elements, until all the elements of the list are visited, and the process
+ restarts.
+- Even if other clients are actively pushing new items at the end of the list.
+
+The above makes it very simple to implement a system where a set of items must
+be processed by N workers continuously as fast as possible. An example is a
+monitoring system that must check that a set of web sites are reachable, with
+the smallest delay possible, using a number of parallel workers.
+
+Note that this implementation of workers is trivially scalable and reliable,
+because even if a message is lost the item is still in the queue and will be
+processed at the next iteration.
diff --git a/iredis/data/commands/lpop.md b/iredis/data/commands/lpop.md
index 6049176..f870112 100644
--- a/iredis/data/commands/lpop.md
+++ b/iredis/data/commands/lpop.md
@@ -1,16 +1,29 @@
-Removes and returns the first element of the list stored at `key`.
+Removes and returns the first elements of the list stored at `key`.
+
+By default, the command pops a single element from the beginning of the list.
+When provided with the optional `count` argument, the reply will consist of up
+to `count` elements, depending on the list's length.
@return
+When called without the `count` argument:
+
@bulk-string-reply: the value of the first element, or `nil` when `key` does not
exist.
+When called with the `count` argument:
+
+@array-reply: list of popped elements, or `nil` when `key` does not exist.
+
+@history
+
+- `>= 6.2`: Added the `count` argument.
+
@examples
```cli
-RPUSH mylist "one"
-RPUSH mylist "two"
-RPUSH mylist "three"
+RPUSH mylist "one" "two" "three" "four" "five"
LPOP mylist
+LPOP mylist 2
LRANGE mylist 0 -1
```
diff --git a/iredis/data/commands/lpos.md b/iredis/data/commands/lpos.md
index c256e0e..77516e1 100644
--- a/iredis/data/commands/lpos.md
+++ b/iredis/data/commands/lpos.md
@@ -2,7 +2,7 @@ The command returns the index of matching elements inside a Redis list. By
default, when no options are given, it will scan the list from head to tail,
looking for the first match of "element". If the element is found, its index
(the zero-based position in the list) is returned. Otherwise, if no match is
-found, NULL is returned.
+found, `nil` is returned.
```
> RPUSH mylist a b c 1 2 3 c c
@@ -64,12 +64,12 @@ indexes. This is better than giving a very large `COUNT` option because it is
more general.
```
-> LPOS mylist COUNT 0
+> LPOS mylist c COUNT 0
[2,6,7]
```
When `COUNT` is used and no match is found, an empty array is returned. However
-when `COUNT` is not used and there are no matches, the command returns NULL.
+when `COUNT` is not used and there are no matches, the command returns `nil`.
Finally, the `MAXLEN` option tells the command to compare the provided element
only with a given maximum number of list items. So for instance specifying
@@ -80,9 +80,13 @@ useful to limit the maximum complexity of the command. It is also useful when we
expect the match to be found very early, but want to be sure that in case this
is not true, the command does not take too much time to run.
+When `MAXLEN` is used, it is possible to specify 0 as the maximum number of
+comparisons, as a way to tell the command we want unlimited comparisons. This is
+better than giving a very large `MAXLEN` option because it is more general.
+
@return
-The command returns the integer representing the matching element, or null if
+The command returns the integer representing the matching element, or `nil` if
there is no match. However, if the `COUNT` option is given the command returns
an array (empty if there are no matches).
diff --git a/iredis/data/commands/memory-usage.md b/iredis/data/commands/memory-usage.md
index 73e26d7..22e2740 100644
--- a/iredis/data/commands/memory-usage.md
+++ b/iredis/data/commands/memory-usage.md
@@ -37,4 +37,4 @@ OK
@return
-@integer-reply: the memory usage in bytes
+@integer-reply: the memory usage in bytes, or `nil` when the key does not exist.
diff --git a/iredis/data/commands/migrate.md b/iredis/data/commands/migrate.md
index cc4561c..096f277 100644
--- a/iredis/data/commands/migrate.md
+++ b/iredis/data/commands/migrate.md
@@ -68,9 +68,12 @@ a single key exists.
- `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or
greater ACL auth style).
-`COPY` and `REPLACE` are available only in 3.0 and above. `KEYS` is available
-starting with Redis 3.0.6. `AUTH` is available starting with Redis 4.0.7.
-`AUTH2` is available starting with Redis 6.0.0.
+@history
+
+- `>= 3.0.0`: Added the `COPY` and `REPLACE` options.
+- `>= 3.0.6`: Added the `KEYS` option.
+- `>= 4.0.7`: Added the `AUTH` option.
+- `>= 6.0.0`: Added the `AUTH2` option.
@return
diff --git a/iredis/data/commands/module-load.md b/iredis/data/commands/module-load.md
index c5919c0..99777c3 100644
--- a/iredis/data/commands/module-load.md
+++ b/iredis/data/commands/module-load.md
@@ -5,7 +5,7 @@ specified by the `path` argument. The `path` should be the absolute path of the
library, including the full filename. Any additional arguments are passed
unmodified to the module.
-**Note**: modules can also be loaded at server startup with 'loadmodule'
+**Note**: modules can also be loaded at server startup with `loadmodule`
configuration directive in `redis.conf`.
@return
diff --git a/iredis/data/commands/monitor.md b/iredis/data/commands/monitor.md
index 7900787..28cbeef 100644
--- a/iredis/data/commands/monitor.md
+++ b/iredis/data/commands/monitor.md
@@ -36,8 +36,8 @@ QUIT
Connection closed by foreign host.
```
-Manually issue the `QUIT` command to stop a `MONITOR` stream running via
-`telnet`.
+Manually issue the `QUIT` or `RESET` commands to stop a `MONITOR` stream running
+via `telnet`.
## Commands not logged by MONITOR
@@ -90,4 +90,5 @@ flow.
@history
-- `>=6.0`: `AUTH` excluded from the command's output.
+- `>= 6.2`: `RESET` can be called to exit monitor mode.
+- `>= 6.0`: `AUTH` excluded from the command's output.
diff --git a/iredis/data/commands/pexpiretime.md b/iredis/data/commands/pexpiretime.md
new file mode 100644
index 0000000..3455ef3
--- /dev/null
+++ b/iredis/data/commands/pexpiretime.md
@@ -0,0 +1,19 @@
+`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute
+Unix expiration timestamp in milliseconds instead of seconds.
+
+@return
+
+@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value
+in order to signal an error (see the description below).
+
+- The command returns `-1` if the key exists but has no associated expiration
+ time.
+- The command returns `-2` if the key does not exist.
+
+@examples
+
+```cli
+SET mykey "Hello"
+PEXPIREAT mykey 33177117420000
+PEXPIRETIME mykey
+```
diff --git a/iredis/data/commands/publish.md b/iredis/data/commands/publish.md
index e4b338a..62283f8 100644
--- a/iredis/data/commands/publish.md
+++ b/iredis/data/commands/publish.md
@@ -1,5 +1,11 @@
Posts a message to the given channel.
+In a Redis Cluster clients can publish to every node. The cluster makes sure
+that published messages are forwarded as needed, so clients can subscribe to any
+channel by connecting to any one of the nodes.
+
@return
-@integer-reply: the number of clients that received the message.
+@integer-reply: the number of clients that received the message. Note that in a
+Redis Cluster, only clients that are connected to the same node as the
+publishing client are included in the count.
diff --git a/iredis/data/commands/pubsub.md b/iredis/data/commands/pubsub.md
index 0a8c0a2..f5a0a7c 100644
--- a/iredis/data/commands/pubsub.md
+++ b/iredis/data/commands/pubsub.md
@@ -4,6 +4,12 @@ separately. The general form is:
PUBSUB <subcommand> ... args ...
+Cluster note: in a Redis Cluster clients can subscribe to every node, and can
+also publish to every other node. The cluster will make sure that published
+messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster
+only report information from the node's Pub/Sub context, rather than the entire
+cluster.
+
# PUBSUB CHANNELS [pattern]
Lists the currently _active channels_. An active channel is a Pub/Sub channel
diff --git a/iredis/data/commands/reset.md b/iredis/data/commands/reset.md
new file mode 100644
index 0000000..d381198
--- /dev/null
+++ b/iredis/data/commands/reset.md
@@ -0,0 +1,23 @@
+This command performs a full reset of the connection's server-side context,
+mimicking the effect of disconnecting and reconnecting again.
+
+When the command is called from a regular client connection, it does the
+following:
+
+- Discards the current `MULTI` transaction block, if one exists.
+- Unwatches all keys `WATCH`ed by the connection.
+- Disables `CLIENT TRACKING`, if in use.
+- Sets the connection to `READWRITE` mode.
+- Cancels the connection's `ASKING` mode, if previously set.
+- Sets `CLIENT REPLY` to `ON`.
+- Sets the protocol version to RESP2.
+- `SELECT`s database 0.
+- Exits `MONITOR` mode, when applicable.
+- Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when
+ appropriate.
+- Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when
+ authentication is enabled.
+
+@return
+
+@simple-string-reply: always 'RESET'.
diff --git a/iredis/data/commands/rpop.md b/iredis/data/commands/rpop.md
index 9c03902..6233c03 100644
--- a/iredis/data/commands/rpop.md
+++ b/iredis/data/commands/rpop.md
@@ -1,16 +1,29 @@
-Removes and returns the last element of the list stored at `key`.
+Removes and returns the last elements of the list stored at `key`.
+
+By default, the command pops a single element from the end of the list. When
+provided with the optional `count` argument, the reply will consist of up to
+`count` elements, depending on the list's length.
@return
+When called without the `count` argument:
+
@bulk-string-reply: the value of the last element, or `nil` when `key` does not
exist.
+When called with the `count` argument:
+
+@array-reply: list of popped elements, or `nil` when `key` does not exist.
+
+@history
+
+- `>= 6.2`: Added the `count` argument.
+
@examples
```cli
-RPUSH mylist "one"
-RPUSH mylist "two"
-RPUSH mylist "three"
+RPUSH mylist "one" "two" "three" "four" "five"
RPOP mylist
+RPOP mylist 2
LRANGE mylist 0 -1
```
diff --git a/iredis/data/commands/rpoplpush.md b/iredis/data/commands/rpoplpush.md
index 7a5b662..2333f84 100644
--- a/iredis/data/commands/rpoplpush.md
+++ b/iredis/data/commands/rpoplpush.md
@@ -11,6 +11,9 @@ performed. If `source` and `destination` are the same, the operation is
equivalent to removing the last element from the list and pushing it as first
element of the list, so it can be considered as a list rotation command.
+As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please prefer `LMOVE` in
+new code.
+
@return
@bulk-string-reply: the element being popped and pushed.
diff --git a/iredis/data/commands/sadd.md b/iredis/data/commands/sadd.md
index a8b280e..df12d6a 100644
--- a/iredis/data/commands/sadd.md
+++ b/iredis/data/commands/sadd.md
@@ -7,7 +7,7 @@ An error is returned when the value stored at `key` is not a set.
@return
@integer-reply: the number of elements that were added to the set, not including
-all the elements already present into the set.
+all the elements already present in the set.
@history
diff --git a/iredis/data/commands/scan.md b/iredis/data/commands/scan.md
index f1674fd..774425a 100644
--- a/iredis/data/commands/scan.md
+++ b/iredis/data/commands/scan.md
@@ -246,7 +246,7 @@ may receive no elements in many iterations.
It is possible for an infinite number of clients to iterate the same collection
at the same time, as the full state of the iterator is in the cursor, that is
-obtained and returned to the client at every call. Server side no state is taken
+obtained and returned to the client at every call. No server side state is taken
at all.
## Terminating iterations in the middle
diff --git a/iredis/data/commands/script-debug.md b/iredis/data/commands/script-debug.md
index 67502b2..52f8194 100644
--- a/iredis/data/commands/script-debug.md
+++ b/iredis/data/commands/script-debug.md
@@ -17,8 +17,8 @@ is active and retains all changes to the data set once it ends.
- `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are
discarded).
-- `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to
- data).
+- `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes
+ to data).
- `NO`. Disables scripts debug mode.
@return
diff --git a/iredis/data/commands/script-flush.md b/iredis/data/commands/script-flush.md
index 833732d..bc5a545 100644
--- a/iredis/data/commands/script-flush.md
+++ b/iredis/data/commands/script-flush.md
@@ -3,6 +3,21 @@ Flush the Lua scripts cache.
Please refer to the `EVAL` documentation for detailed information about Redis
Lua scripting.
+By default, `SCRIPT FLUSH` will synchronously flush the cache. Starting with
+Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
+"yes" changes the default flush mode to asynchronous.
+
+It is possible to use one of the following modifiers to dictate the flushing
+mode explicitly:
+
+- `ASYNC`: flushes the cache asynchronously
+- `!SYNC`: flushes the cache synchronously
+
@return
@simple-string-reply
+
+@history
+
+- `>= 6.2.0`: Added the `ASYNC` and `!SYNC` flushing mode modifiers, as well as
+ the **lazyfree-lazy-user-flush** configuration directive.
diff --git a/iredis/data/commands/set.md b/iredis/data/commands/set.md
index 2cf4afa..8f5c302 100644
--- a/iredis/data/commands/set.md
+++ b/iredis/data/commands/set.md
@@ -8,24 +8,41 @@ The `SET` command supports a set of options that modify its behavior:
- `EX` _seconds_ -- Set the specified expire time, in seconds.
- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
+- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
+ will expire, in seconds.
+- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
+ key will expire, in milliseconds.
- `NX` -- Only set the key if it does not already exist.
- `XX` -- Only set the key if it already exist.
- `KEEPTTL` -- Retain the time to live associated with the key.
+- `GET` -- Return the old string stored at key, or nil if key did not exist. An
+ error is returned and `SET` aborted if the value stored at key is not a
+ string.
-Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, it
-is possible that in future versions of Redis these three commands will be
+Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`,
+`GETSET`, it is possible that in future versions of Redis these commands will be
deprecated and finally removed.
@return
-@simple-string-reply: `OK` if `SET` was executed correctly. @nil-reply: a Null
-Bulk Reply is returned if the `SET` operation was not performed because the user
+@simple-string-reply: `OK` if `SET` was executed correctly.
+
+@nil-reply: `(nil)` if the `SET` operation was not performed because the user
specified the `NX` or `XX` option but the condition was not met.
+If the command is issued with the `GET` option, the above does not apply. It
+will instead reply as follows, regardless if the `SET` was actually performed:
+
+@bulk-string-reply: the old string value stored at key.
+
+@nil-reply: `(nil)` if the key did not exist.
+
@history
- `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options.
- `>= 6.0`: Added the `KEEPTTL` option.
+- `>= 6.2`: Added the `GET`, `EXAT` and `PXAT` option.
+- `>= 7.0`: Allowed the `NX` and `GET` options to be used together.
@examples
@@ -39,7 +56,7 @@ SET anotherkey "will expire in a minute" EX 60
## Patterns
**Note:** The following pattern is discouraged in favor of
-[the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit
+[the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
more complex to implement, but offers better guarantees and is fault tolerant.
The command `SET resource-name anystring NX EX max-lock-time` is a simple way to
diff --git a/iredis/data/commands/setbit.md b/iredis/data/commands/setbit.md
index a6b64f2..6f3a553 100644
--- a/iredis/data/commands/setbit.md
+++ b/iredis/data/commands/setbit.md
@@ -1,7 +1,8 @@
Sets or clears the bit at _offset_ in the string value stored at _key_.
-The bit is either set or cleared depending on _value_, which can be either 0
-or 1.
+The bit is either set or cleared depending on _value_, which can be either 0 or
+
+1.
When _key_ does not exist, a new string value is created. The string is grown to
make sure it can hold a bit at _offset_. The _offset_ argument is required to be
diff --git a/iredis/data/commands/setnx.md b/iredis/data/commands/setnx.md
index 889f10c..acd77df 100644
--- a/iredis/data/commands/setnx.md
+++ b/iredis/data/commands/setnx.md
@@ -22,7 +22,7 @@ GET mykey
**Please note that:**
1. The following pattern is discouraged in favor of
- [the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit
+ [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
more complex to implement, but offers better guarantees and is fault
tolerant.
2. We document the old pattern anyway because certain existing implementations
diff --git a/iredis/data/commands/slowlog.md b/iredis/data/commands/slowlog.md
index 267a6bb..258c122 100644
--- a/iredis/data/commands/slowlog.md
+++ b/iredis/data/commands/slowlog.md
@@ -29,6 +29,8 @@ _slowlog-log-slower-than_ config parameter to zero) with minor performance hit.
To read the slow log the **SLOWLOG GET** command is used, that returns every
entry in the slow log. It is possible to return only the N most recent entries
passing an additional argument to the command (for instance **SLOWLOG GET 10**).
+The default requested length is 10 (when the argument is omitted). It's possible
+to pass -1 to get the entire slowlog.
Note that you need a recent version of redis-cli in order to read the slow log
output, since it uses some features of the protocol that were not formerly
diff --git a/iredis/data/commands/smismember.md b/iredis/data/commands/smismember.md
new file mode 100644
index 0000000..6d66eeb
--- /dev/null
+++ b/iredis/data/commands/smismember.md
@@ -0,0 +1,17 @@
+Returns whether each `member` is a member of the set stored at `key`.
+
+For every `member`, `1` is returned if the value is a member of the set, or `0`
+if the element is not a member of the set or if `key` does not exist.
+
+@return
+
+@array-reply: list representing the membership of the given elements, in the
+same order as they are requested.
+
+@examples
+
+```cli
+SADD myset "one"
+SADD myset "one"
+SMISMEMBER myset "one" "notamember"
+```
diff --git a/iredis/data/commands/spop.md b/iredis/data/commands/spop.md
index 76b1c89..057a025 100644
--- a/iredis/data/commands/spop.md
+++ b/iredis/data/commands/spop.md
@@ -1,14 +1,26 @@
-Removes and returns one or more random elements from the set value store at
+Removes and returns one or more random members from the set value store at
`key`.
This operation is similar to `SRANDMEMBER`, that returns one or more random
elements from a set but does not remove it.
-The `count` argument is available since version 3.2.
+By default, the command pops a single member from the set. When provided with
+the optional `count` argument, the reply will consist of up to `count` members,
+depending on the set's cardinality.
@return
-@bulk-string-reply: the removed element, or `nil` when `key` does not exist.
+When called without the `count` argument:
+
+@bulk-string-reply: the removed member, or `nil` when `key` does not exist.
+
+When called with the `count` argument:
+
+@array-reply: the removed members, or an empty array when `key` does not exist.
+
+@history
+
+- `>= 3.2`: Added the `count` argument.
@examples
@@ -24,18 +36,8 @@ SPOP myset 3
SMEMBERS myset
```
-## Specification of the behavior when count is passed
-
-If count is bigger than the number of elements inside the Set, the command will
-only return the whole set without additional elements.
-
## Distribution of returned elements
Note that this command is not suitable when you need a guaranteed uniform
distribution of the returned elements. For more information about the algorithms
-used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms.
-
-## Count argument extension
-
-Redis 3.2 introduced an optional `count` argument that can be passed to `SPOP`
-in order to retrieve multiple elements in a single call.
+used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms.
diff --git a/iredis/data/commands/srandmember.md b/iredis/data/commands/srandmember.md
index 99f9b7e..640ccdc 100644
--- a/iredis/data/commands/srandmember.md
+++ b/iredis/data/commands/srandmember.md
@@ -1,22 +1,21 @@
When called with just the `key` argument, return a random element from the set
value stored at `key`.
-Starting from Redis version 2.6, when called with the additional `count`
-argument, return an array of `count` **distinct elements** if `count` is
-positive. If called with a negative `count` the behavior changes and the command
-is allowed to return the **same element multiple times**. In this case the
-number of returned elements is the absolute value of the specified `count`.
+If the provided `count` argument is positive, return an array of **distinct
+elements**. The array's length is either `count` or the set's cardinality
+(`SCARD`), whichever is lower.
-When called with just the key argument, the operation is similar to `SPOP`,
-however while `SPOP` also removes the randomly selected element from the set,
-`SRANDMEMBER` will just return a random element without altering the original
-set in any way.
+If called with a negative `count`, the behavior changes and the command is
+allowed to return the **same element multiple times**. In this case, the number
+of returned elements is the absolute value of the specified `count`.
@return
-@bulk-string-reply: without the additional `count` argument the command returns
+@bulk-string-reply: without the additional `count` argument, the command returns
a Bulk Reply with the randomly selected element, or `nil` when `key` does not
-exist. @array-reply: when the additional `count` argument is passed the command
+exist.
+
+@array-reply: when the additional `count` argument is passed, the command
returns an array of elements, or an empty array when `key` does not exist.
@examples
@@ -28,26 +27,32 @@ SRANDMEMBER myset 2
SRANDMEMBER myset -5
```
+@history
+
+- `>= 2.6.0`: Added the optional `count` argument.
+
## Specification of the behavior when count is passed
-When a count argument is passed and is positive, the elements are returned as if
-every selected element is removed from the set (like the extraction of numbers
-in the game of Bingo). However elements are **not removed** from the Set. So
-basically:
+When the `count` argument is a positive value this command behaves as follows:
- No repeated elements are returned.
-- If count is bigger than the number of elements inside the Set, the command
- will only return the whole set without additional elements.
+- If `count` is bigger than the set's cardinality, the command will only return
+ the whole set without additional elements.
+- The order of elements in the reply is not truly random, so it is up to the
+ client to shuffle them if needed.
-When instead the count is negative, the behavior changes and the extraction
-happens as if you put the extracted element inside the bag again after every
-extraction, so repeated elements are possible, and the number of elements
-requested is always returned as we can repeat the same elements again and again,
-with the exception of an empty Set (non existing key) that will always produce
-an empty array as a result.
+When the `count` is a negative value, the behavior changes as follows:
+
+- Repeating elements are possible.
+- Exactly `count` elements, or an empty array if the set is empty (non-existing
+ key), are always returned.
+- The order of elements in the reply is truly random.
## Distribution of returned elements
+Note: this section is relevant only for Redis 5 or below, as Redis 6 implements
+a fairer algorithm.
+
The distribution of the returned elements is far from perfect when the number of
elements in the set is small, this is due to the fact that we used an
approximated random element function that does not really guarantees good
diff --git a/iredis/data/commands/stralgo.md b/iredis/data/commands/stralgo.md
index ccd72fe..f3a31dd 100644
--- a/iredis/data/commands/stralgo.md
+++ b/iredis/data/commands/stralgo.md
@@ -11,7 +11,7 @@ argument must be "LCS", since this is the only implemented one.
## LCS algorithm
```
-STRALGO LCS [KEYS ...] [STRINGS ...] [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
+STRALGO LCS STRINGS <string_a> <string_b> | KEYS <key_a> <key_b> [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
```
The LCS subcommand implements the longest common subsequence algorithm. Note
@@ -113,9 +113,9 @@ For the LCS algorithm:
- Without modifiers the string representing the longest common substring is
returned.
-- When LEN is given the command returns the length of the longest common
+- When `LEN` is given the command returns the length of the longest common
substring.
-- When IDX is given the command returns an array with the LCS length and all the
- ranges in both the strings, start and end offset for each string, where there
- are matches. When WITHMATCHLEN is given each array representing a match will
- also have the length of the match (see examples).
+- When `IDX` is given the command returns an array with the LCS length and all
+ the ranges in both the strings, start and end offset for each string, where
+ there are matches. When `WITHMATCHLEN` is given each array representing a
+ match will also have the length of the match (see examples).
diff --git a/iredis/data/commands/subscribe.md b/iredis/data/commands/subscribe.md
index 997670c..2715c63 100644
--- a/iredis/data/commands/subscribe.md
+++ b/iredis/data/commands/subscribe.md
@@ -2,4 +2,8 @@ Subscribes the client to the specified channels.
Once the client enters the subscribed state it is not supposed to issue any
other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`,
-`PUNSUBSCRIBE`, `PING` and `QUIT` commands.
+`PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands.
+
+@history
+
+- `>= 6.2`: `RESET` can be called to exit subscribed state.
diff --git a/iredis/data/commands/xack.md b/iredis/data/commands/xack.md
index 76b7c13..eb7ab83 100644
--- a/iredis/data/commands/xack.md
+++ b/iredis/data/commands/xack.md
@@ -1,5 +1,5 @@
-The `XACK` command removes one or multiple messages from the _pending entries
-list_ (PEL) of a stream consumer group. A message is pending, and as such stored
+The `XACK` command removes one or multiple messages from the _Pending Entries
+List_ (PEL) of a stream consumer group. A message is pending, and as such stored
inside the PEL, when it was delivered to some consumer, normally as a side
effect of calling `XREADGROUP`, or when a consumer took ownership of a message
calling `XCLAIM`. The pending message was delivered to some consumer but the
@@ -17,9 +17,13 @@ entry about this message is also purged, releasing memory from the Redis server.
@integer-reply, specifically:
The command returns the number of messages successfully acknowledged. Certain
-message IDs may no longer be part of the PEL (for example because they have been
-already acknowledge), and XACK will not count them as successfully acknowledged.
+message IDs may no longer be part of the PEL (for example because they have
+already been acknowledged), and XACK will not count them as successfully
+acknowledged.
-```cli
-XACK mystream mygroup 1526569495631-0
+@examples
+
+```
+redis> XACK mystream mygroup 1526569495631-0
+(integer) 1
```
diff --git a/iredis/data/commands/xadd.md b/iredis/data/commands/xadd.md
index d60b571..aca1de7 100644
--- a/iredis/data/commands/xadd.md
+++ b/iredis/data/commands/xadd.md
@@ -1,6 +1,7 @@
Appends the specified stream entry to the stream at the specified key. If the
key does not exist, as a side effect of running this command the key is created
-with a stream value.
+with a stream value. The creation of stream's key can be disabled with the
+`NOMKSTREAM` option.
An entry is composed of a set of field-value pairs, it is basically a small
dictionary. The field-value pairs are stored in the same order they are given by
@@ -14,11 +15,12 @@ stream.
## Specifying a Stream ID as an argument
-A stream entry ID identifies a given entry inside a stream. The `XADD` command
-will auto-generate a unique ID for you if the ID argument specified is the `*`
-character (asterisk ASCII character). However, while useful only in very rare
-cases, it is possible to specify a well-formed ID, so that the new entry will be
-added exactly with the specified ID.
+A stream entry ID identifies a given entry inside a stream.
+
+The `XADD` command will auto-generate a unique ID for you if the ID argument
+specified is the `*` character (asterisk ASCII character). However, while useful
+only in very rare cases, it is possible to specify a well-formed ID, so that the
+new entry will be added exactly with the specified ID.
IDs are specified by two numbers separated by a `-` character:
@@ -39,30 +41,27 @@ or if after a failover the new master has a different absolute time.
When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`,
and the user _must_ specify an ID which is greater than any other ID currently
-inside the stream, otherwise the command will fail. Usually resorting to
-specific IDs is useful only if you have another system generating unique IDs
-(for instance an SQL table) and you really want the Redis stream IDs to match
-the one of this other system.
+inside the stream, otherwise the command will fail and return an error. Usually
+resorting to specific IDs is useful only if you have another system generating
+unique IDs (for instance an SQL table) and you really want the Redis stream IDs
+to match the one of this other system.
## Capped streams
-It is possible to limit the size of the stream to a maximum number of elements
-using the **MAXLEN** option.
+`XADD` incorporates the same semantics as the `XTRIM` command - refer to its
+documentation page for more information. This allows adding new entries and
+keeping the stream's size in check with a single call to `XADD`, effectively
+capping the stream with an arbitrary threshold. Although exact trimming is
+possible and is the default, due to the internal representation of steams it is
+more efficient to add an entry and trim stream with `XADD` using **almost
+exact** trimming (the `~` argument).
-Trimming with **MAXLEN** can be expensive compared to just adding entries with
-`XADD`: streams are represented by macro nodes into a radix tree, in order to be
-very memory efficient. Altering the single macro node, consisting of a few tens
-of elements, is not optimal. So it is possible to give the command in the
-following special form:
+For example, calling `XADD` in the following form:
XADD mystream MAXLEN ~ 1000 * ... entry fields here ...
-The `~` argument between the **MAXLEN** option and the actual count means that
-the user is not really requesting that the stream length is exactly 1000 items,
-but instead it could be a few tens of entries more, but never less than 1000
-items. When this option modifier is used, the trimming is performed only when
-Redis is able to remove a whole macro node. This makes it much more efficient,
-and it is usually what you want.
+Will add a new entry but will also evict old entries so that the stream will
+contain only 1000 entries, or at most a few tens more.
## Additional information about streams
@@ -77,6 +76,14 @@ The command returns the ID of the added entry. The ID is the one auto-generated
if `*` is passed as ID argument, otherwise the command just returns the same ID
specified by the user during insertion.
+The command returns a @nil-reply when used with the `NOMKSTREAM` option and the
+key doesn't exist.
+
+@history
+
+- `>= 6.2`: Added the `NOMKSTREAM` option, `MINID` trimming strategy and the
+ `LIMIT` option.
+
@examples
```cli
diff --git a/iredis/data/commands/xautoclaim.md b/iredis/data/commands/xautoclaim.md
new file mode 100644
index 0000000..fe02fc5
--- /dev/null
+++ b/iredis/data/commands/xautoclaim.md
@@ -0,0 +1,70 @@
+This command transfers ownership of pending stream entries that match the
+specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling
+`XPENDING` and then `XCLAIM`, but provides a more straightforward way to deal
+with message delivery failures via `SCAN`-like semantics.
+
+Like `XCLAIM`, the command operates on the stream entries at `<key>` and in the
+context of the provided `<group>`. It transfers ownership to `<consumer>` of
+messages pending for more than `<min-idle-time>` milliseconds and having an
+equal or greater ID than `<start>`.
+
+The optional `<count>` argument, which defaults to 100, is the upper limit of
+the number of entries that the command attempts to claim. Internally, the
+command begins scanning the consumer group's Pending Entries List (PEL) from
+`<start>` and filters out entries having an idle time less than or equal to
+`<min-idle-time>`. The maximum number of pending entries that the command scans
+is the product of multiplying `<count>`'s value by 10 (hard-coded). It is
+possible, therefore, that the number of entries claimed will be less than the
+specified value.
+
+The optional `JUSTID` argument changes the reply to return just an array of IDs
+of messages successfully claimed, without returning the actual message. Using
+this option means the retry counter is not incremented.
+
+The command returns the claimed entries as an array. It also returns a stream ID
+intended for cursor-like use as the `<start>` argument for its subsequent call.
+When there are no remaining PEL entries, the command returns the special `0-0`
+ID to signal completion. However, note that you may want to continue calling
+`XAUTOCLAIM` even after the scan is complete with the `0-0` as `<start>` ID,
+because enough time passed, so older pending entries may now be eligible for
+claiming.
+
+Note that only messages that are idle longer than `<min-idle-time>` are claimed,
+and claiming a message resets its idle time. This ensures that only a single
+consumer can successfully claim a given pending message at a specific instant of
+time and trivially reduces the probability of processing the same message
+multiple times.
+
+Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted
+deliveries count for that message, unless the `JUSTID` option has been specified
+(which only delivers the message ID, not the message itself). Messages that
+cannot be processed for some reason - for example, because consumers
+systematically crash when processing them - will exhibit high attempted delivery
+counts that can be detected by monitoring.
+
+@return
+
+@array-reply, specifically:
+
+An array with two elements:
+
+1. The first element is a stream ID to be used as the `<start>` argument for the
+ next call to `XAUTOCLAIM`
+2. The second element is an array containing all the successfully claimed
+ messages in the same format as `XRANGE`.
+
+@examples
+
+```
+> XAUTOCLAIM mystream mygroup Alice 3600000 0-0 COUNT 25
+1) "0-0"
+2) 1) 1) "1609338752495-0"
+ 2) 1) "field"
+ 2) "value"
+```
+
+In the above example, we attempt to claim up to 25 entries that are pending and
+idle (not having been acknowledged or claimed) for at least an hour, starting at
+the stream's beginning. The consumer "Alice" from the "mygroup" group acquires
+ownership of these messages. Note that the stream ID returned in the example is
+`0-0`, indicating that the entire stream was scanned.
diff --git a/iredis/data/commands/xclaim.md b/iredis/data/commands/xclaim.md
index e6ee8c9..7b611d2 100644
--- a/iredis/data/commands/xclaim.md
+++ b/iredis/data/commands/xclaim.md
@@ -5,14 +5,15 @@ command argument. Normally this is what happens:
1. There is a stream with an associated consumer group.
2. Some consumer A reads a message via `XREADGROUP` from a stream, in the
context of that consumer group.
-3. As a side effect a pending message entry is created in the pending entries
- list (PEL) of the consumer group: it means the message was delivered to a
+3. As a side effect a pending message entry is created in the Pending Entries
+ List (PEL) of the consumer group: it means the message was delivered to a
given consumer, but it was not yet acknowledged via `XACK`.
4. Then suddenly that consumer fails forever.
5. Other consumers may inspect the list of pending messages, that are stale for
quite some time, using the `XPENDING` command. In order to continue
processing such messages, they use `XCLAIM` to acquire the ownership of the
- message and continue.
+ message and continue. As of Redis 6.2, consumers can use the `XAUTOCLAIM`
+ command to automatically scan and claim stale pending messages.
This dynamic is clearly explained in the
[Stream intro documentation](/topics/streams-intro).
@@ -68,7 +69,7 @@ The command returns all the messages successfully claimed, in the same format as
`XRANGE`. However if the `JUSTID` option was specified, only the message IDs are
reported, without including the actual message.
-Example:
+@examples
```
> XCLAIM mystream mygroup Alice 3600000 1526569498055-0
diff --git a/iredis/data/commands/xgroup.md b/iredis/data/commands/xgroup.md
index b690d87..80948a6 100644
--- a/iredis/data/commands/xgroup.md
+++ b/iredis/data/commands/xgroup.md
@@ -11,7 +11,7 @@ To create a new consumer group, use the following form:
XGROUP CREATE mystream consumer-group-name $
The last argument is the ID of the last item in the stream to consider already
-delivered. In the above case we used the special ID '\$' (that means: the ID of
+delivered. In the above case we used the special ID '$' (that means: the ID of
the last item in the stream). In this case the consumers fetching data from that
consumer group will only see new elements arriving in the stream.
@@ -22,8 +22,9 @@ starting ID for the consumer group:
Of course it is also possible to use any other valid ID. If the specified
consumer group already exists, the command returns a `-BUSYGROUP` error.
-Otherwise the operation is performed and OK is returned. There are no hard
-limits to the number of consumer groups you can associate to a given stream.
+Otherwise, the operation is performed and a @simple-string-reply `OK` is
+returned. There are no hard limits to the number of consumer groups you can
+associate with a given stream.
If the specified stream doesn't exist when creating a group, an error will be
returned. You can use the optional `MKSTREAM` subcommand as the last argument
@@ -38,16 +39,26 @@ A consumer group can be destroyed completely by using the following form:
The consumer group will be destroyed even if there are active consumers and
pending messages, so make sure to call this command only when really needed.
+This form returns an @integer-reply with the number of destroyed consumer groups
+(0 or 1).
+
+Consumers in a consumer group are auto-created every time a new consumer name is
+mentioned by some command. They can also be explicitly created by using the
+following form:
+
+ XGROUP CREATECONSUMER mystream consumer-group-name myconsumer123
+
+This form returns an @integer-reply with the number of created consumers (0 or
+1).
To just remove a given consumer from a consumer group, the following form is
used:
XGROUP DELCONSUMER mystream consumer-group-name myconsumer123
-Consumers in a consumer group are auto-created every time a new consumer name is
-mentioned by some command. However sometimes it may be useful to remove old
-consumers since they are no longer used. This form returns the number of pending
-messages that the consumer had before it was deleted.
+Sometimes it may be useful to remove old consumers since they are no longer
+used. This form returns an @integer-reply with the number of pending messages
+that the consumer had before it was deleted.
Finally it possible to set the next message to deliver using the `SETID`
subcommand. Normally the next ID is set when the consumer is created, as the
@@ -58,7 +69,13 @@ messages in a stream, you may want to set its next ID to 0:
XGROUP SETID mystream consumer-group-name 0
+This form returns a @simple-string-reply `OK` or an error.
+
Finally to get some help if you don't remember the syntax, use the HELP
subcommand:
XGROUP HELP
+
+@history
+
+- `>= 6.2.0`: Supports the `CREATECONSUMER` subcommand.
diff --git a/iredis/data/commands/xinfo.md b/iredis/data/commands/xinfo.md
index b9c228d..d8c226b 100644
--- a/iredis/data/commands/xinfo.md
+++ b/iredis/data/commands/xinfo.md
@@ -39,7 +39,8 @@ is the stream content.
- `XINFO STREAM <key> FULL [COUNT <count>]`
In this form the command returns the entire state of the stream, including
-entries, groups, consumers and PELs. This form is available since Redis 6.0.
+entries, groups, consumers and Pending Entries Lists (PELs). This form is
+available since Redis 6.0.
```
> XADD mystream * foo bar
diff --git a/iredis/data/commands/xpending.md b/iredis/data/commands/xpending.md
index 53e3f33..7eb48e0 100644
--- a/iredis/data/commands/xpending.md
+++ b/iredis/data/commands/xpending.md
@@ -2,7 +2,7 @@ Fetching data from a stream via a consumer group, and not acknowledging such
data, has the effect of creating _pending entries_. This is well explained in
the `XREADGROUP` command, and even better in our
[introduction to Redis Streams](/topics/streams-intro). The `XACK` command will
-immediately remove the pending entry from the Pending Entry List (PEL) since
+immediately remove the pending entry from the Pending Entries List (PEL) since
once a message is successfully processed, there is no longer need for the
consumer group to track it and to remember the current owner of the message.
@@ -58,10 +58,13 @@ consumer group, which is one, followed by the smallest and greatest ID among the
pending messages, and then list every consumer in the consumer group with at
least one pending message, and the number of pending messages it has.
-This is a good overview, but sometimes we are interested in the details. In
-order to see all the pending messages with more associated information we need
-to also pass a range of IDs, in a similar way we do it with `XRANGE`, and a non
-optional _count_ argument, to limit the number of messages returned per call:
+## Extended form of XPENDING
+
+The summary provides a good overview, but sometimes we are interested in the
+details. In order to see all the pending messages with more associated
+information we need to also pass a range of IDs, in a similar way we do it with
+`XRANGE`, and a non optional _count_ argument, to limit the number of messages
+returned per call:
```
> XPENDING mystream group55 - + 10
@@ -71,7 +74,7 @@ optional _count_ argument, to limit the number of messages returned per call:
4) (integer) 1
```
-In the extended form we no longer see the summary information, instead there are
+In the extended form we no longer see the summary information, instead there is
detailed information for each message in the pending entries list. For each
message four attributes are returned:
@@ -87,8 +90,8 @@ when some other consumer _claims_ the message with `XCLAIM`, or when the message
is delivered again via `XREADGROUP`, when accessing the history of a consumer in
a consumer group (see the `XREADGROUP` page for more info).
-Finally it is possible to pass an additional argument to the command, in order
-to see the messages having a specific owner:
+It is possible to pass an additional argument to the command, in order to see
+the messages having a specific owner:
```
> XPENDING mystream group55 - + 10 consumer-123
@@ -101,6 +104,29 @@ even when there are many pending messages from many consumers: we have a pending
entries list data structure both globally, and for every consumer, so we can
very efficiently show just messages pending for a single consumer.
+## Idle time filter
+
+Since version 6.2 it is possible to filter entries by their idle-time, given in
+milliseconds (useful for `XCLAIM`ing entries that have not been processed for
+some time):
+
+```
+> XPENDING mystream group55 IDLE 9000 - + 10
+> XPENDING mystream group55 IDLE 9000 - + 10 consumer-123
+```
+
+The first case will return the first 10 (or less) PEL entries of the entire
+group that are idle for over 9 seconds, whereas in the second case only those of
+`consumer-123`.
+
+## Exclusive ranges and iterating the PEL
+
+The `XPENDING` command allows iterating over the pending entries just like
+`XRANGE` and `XREVRANGE` allow for the stream's entries. You can do this by
+prefixing the ID of the last-read pending entry with the `(` character that
+denotes an open (exclusive) range, and proving it to the subsequent call to the
+command.
+
@return
@array-reply, specifically:
@@ -108,3 +134,7 @@ very efficiently show just messages pending for a single consumer.
The command returns data in different format depending on the way it is called,
as previously explained in this page. However the reply is always an array of
items.
+
+@history
+
+- `>= 6.2.0`: Added the `IDLE` option and exclusive range intervals.
diff --git a/iredis/data/commands/xrange.md b/iredis/data/commands/xrange.md
index b9d5ab0..fc0337e 100644
--- a/iredis/data/commands/xrange.md
+++ b/iredis/data/commands/xrange.md
@@ -67,6 +67,13 @@ Used in this way `XRANGE` works as a range query command to obtain entries in a
specified time. This is very handy in order to access the history of past events
in a stream.
+## Exclusive ranges
+
+The range is close (inclusive) by default, meaning that the reply can include
+entries with IDs matching the query's start and end intervals. It is possible to
+specify an open interval (exclusive) by prefixing the ID with the character `(`.
+This is useful for iterating the stream, as explained below.
+
## Returning a maximum number of entries
Using the **COUNT** option it is possible to reduce the number of entries
@@ -110,14 +117,14 @@ is trivial:
```
Then instead of starting the iteration again from `-`, as the start of the range
-we use the entry ID of the _last_ entry returned by the previous `XRANGE` call,
-adding the sequence part of the ID by one.
+we use the entry ID of the _last_ entry returned by the previous `XRANGE` call
+as an exclusive interval.
-The ID of the last entry is `1526985685298-0`, so we just add 1 to the sequence
-to obtain `1526985685298-1`, and continue our iteration:
+The ID of the last entry is `1526985685298-0`, so we just prefix it with a '(',
+and continue our iteration:
```
-> XRANGE writers 1526985685298-1 + COUNT 2
+> XRANGE writers (1526985685298-0 + COUNT 2
1) 1) 1526985691746-0
2) 1) "name"
2) "Toni"
@@ -139,6 +146,37 @@ The command `XREAD` is also able to iterate the stream. The command `XREVRANGE`
can iterate the stream reverse, from higher IDs (or times) to lower IDs (or
times).
+### Iterating with earlier versions of Redis
+
+While exclusive range intervals are only available from Redis 6.2, it is still
+possible to use a similar stream iteration pattern with earlier versions. You
+start fetching from the stream the same way as described above to obtain the
+first entries.
+
+For the subsequent calls, you'll need to programmatically advance the last
+entry's ID returned. Most Redis client should abstract this detail, but the
+implementation can also be in the application if needed. In the example above,
+this means incrementing the sequence of `1526985685298-0` by one, from 0 to 1.
+The second call would, therefore, be:
+
+```
+> XRANGE writers 1526985685298-1 + COUNT 2
+1) 1) 1526985691746-0
+ 2) 1) "name"
+ 2) "Toni"
+...
+```
+
+Also, note that once the sequence part of the last ID equals
+18446744073709551615, you'll need to increment the timestamp and reset the
+sequence part to 0. For example, incrementing the ID
+`1526985685298-18446744073709551615` should result in `1526985685299-0`.
+
+A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The only
+difference is that the client needs to decrement the ID for the subsequent
+calls. When decrementing an ID with a sequence part of 0, the timestamp needs to
+be decremented by 1 and the sequence set to 18446744073709551615.
+
## Fetching single items
If you look for an `XGET` command you'll be disappointed because `XRANGE` is
@@ -170,6 +208,10 @@ returned entries are complete, that means that the ID and all the fields they
are composed are returned. Moreover, the entries are returned with their fields
and values in the exact same order as `XADD` added them.
+@history
+
+- `>= 6.2` Added exclusive ranges.
+
@examples
```cli
diff --git a/iredis/data/commands/xreadgroup.md b/iredis/data/commands/xreadgroup.md
index fb0b21c..f9201e4 100644
--- a/iredis/data/commands/xreadgroup.md
+++ b/iredis/data/commands/xreadgroup.md
@@ -29,7 +29,7 @@ the history of messages that were delivered to it, so a message has just a
single owner. However there is a special feature called _message claiming_ that
allows other consumers to claim messages in case there is a non recoverable
failure of some consumer. In order to implement such semantics, consumer groups
-require explicit acknowledgement of the messages successfully processed by the
+require explicit acknowledgment of the messages successfully processed by the
consumer, via the `XACK` command. This is needed because the stream will track,
for each consumer group, who is processing what message.
@@ -88,7 +88,7 @@ no differences in this regard.
Two things:
1. If the message was never delivered to anyone, that is, if we are talking
- about a new message, then a PEL (Pending Entry List) is created.
+ about a new message, then a PEL (Pending Entries List) is created.
2. If instead the message was already delivered to this consumer, and it is just
re-fetching the same message again, then the _last delivery counter_ is
updated to the current time, and the _number of deliveries_ is incremented by
@@ -129,3 +129,19 @@ acknowledged all the pending messages: we can start to use `>` as ID, in order
to get the new messages and rejoin the consumers that are processing new things.
To see how the command actually replies, please check the `XREAD` command page.
+
+@return
+
+@array-reply, specifically:
+
+The command returns an array of results: each element of the returned array is
+an array composed of a two element containing the key name and the entries
+reported for that key. The entries reported are full stream entries, having IDs
+and the list of all the fields and values. Field and values are guaranteed to be
+reported in the same order they were added by `XADD`.
+
+When **BLOCK** is used, on timeout a null reply is returned.
+
+Reading the [Redis Streams introduction](/topics/streams-intro) is highly
+suggested in order to understand more about the streams overall behavior and
+semantics.
diff --git a/iredis/data/commands/xrevrange.md b/iredis/data/commands/xrevrange.md
index 35d7438..e7c317d 100644
--- a/iredis/data/commands/xrevrange.md
+++ b/iredis/data/commands/xrevrange.md
@@ -14,54 +14,6 @@ send:
XREVRANGE somestream + - COUNT 1
-## Iterating with XREVRANGE
-
-Like `XRANGE` this command can be used in order to iterate the whole stream
-content, however note that in this case, the next command calls should use the
-ID of the last entry, with the sequence number decremented by one. However if
-the sequence number is already 0, the time part of the ID should be decremented
-by 1, and the sequence part should be set to the maximum possible sequence
-number, that is, 18446744073709551615, or could be omitted at all, and the
-command will automatically assume it to be such a number (see `XRANGE` for more
-info about incomplete IDs).
-
-Example:
-
-```
-> XREVRANGE writers + - COUNT 2
-1) 1) 1526985723355-0
- 2) 1) "name"
- 2) "Ngozi"
- 3) "surname"
- 4) "Adichie"
-2) 1) 1526985712947-0
- 2) 1) "name"
- 2) "Agatha"
- 3) "surname"
- 4) "Christie"
-```
-
-The last ID returned is `1526985712947-0`, since the sequence number is already
-zero, the next ID I'll use instead of the `+` special ID will be
-`1526985712946-18446744073709551615`, or just `18446744073709551615`:
-
-```
-> XREVRANGE writers 1526985712946-18446744073709551615 - COUNT 2
-1) 1) 1526985691746-0
- 2) 1) "name"
- 2) "Toni"
- 3) "surname"
- 4) "Morrison"
-2) 1) 1526985685298-0
- 2) 1) "name"
- 2) "Jane"
- 3) "surname"
- 4) "Austen"
-```
-
-And so for until the iteration is complete and no result is returned. See the
-`XRANGE` page about iterating for more information.
-
@return
@array-reply, specifically:
@@ -72,6 +24,10 @@ means that the ID and all the fields they are composed are returned. Moreover
the entries are returned with their fields and values in the exact same order as
`XADD` added them.
+@history
+
+- `>= 6.2` Added exclusive ranges.
+
@examples
```cli
diff --git a/iredis/data/commands/xtrim.md b/iredis/data/commands/xtrim.md
index 090650b..0903442 100644
--- a/iredis/data/commands/xtrim.md
+++ b/iredis/data/commands/xtrim.md
@@ -1,34 +1,71 @@
-`XTRIM` trims the stream to a given number of items, evicting older items (items
-with lower IDs) if needed. The command is conceived to accept multiple trimming
-strategies, however currently only a single one is implemented, which is
-`MAXLEN`, and works exactly as the `MAXLEN` option in `XADD`.
+`XTRIM` trims the stream by evicting older entries (entries with lower IDs) if
+needed.
-For example the following command will trim the stream to exactly the latest
-1000 items:
+Trimming the stream can be done using one of these strategies:
+
+- `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified
+ `threshold`, where `threshold` is a positive integer.
+- `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is
+ a stream ID.
+
+For example, this will trim the stream to exactly the latest 1000 items:
```
XTRIM mystream MAXLEN 1000
```
-It is possible to give the command in the following special form in order to
-make it more efficient:
+Whereas in this example, all entries that have an ID lower than 649085820-0 will
+be evicted:
+
+```
+XTRIM mystream MINID 649085820
+```
+
+By default, or when provided with the optional `=` argument, the command
+performs exact trimming.
+
+Depending on the strategy, exact trimming means:
+
+- `MAXLEN`: the trimmed stream's length will be exactly the minimum between its
+ original length and the specified `threshold`.
+- `MINID`: the oldest ID in the stream will be exactly the minimum between its
+ original oldest ID and the specified `threshold`.
+
+## Nearly exact trimming
+
+Because exact trimming may require additional effort from the Redis server, the
+optional `~` argument can be provided to make it more efficient.
+
+For example:
```
XTRIM mystream MAXLEN ~ 1000
```
-The `~` argument between the **MAXLEN** option and the actual count means that
-the user is not really requesting that the stream length is exactly 1000 items,
-but instead it could be a few tens of entries more, but never less than 1000
-items. When this option modifier is used, the trimming is performed only when
-Redis is able to remove a whole macro node. This makes it much more efficient,
-and it is usually what you want.
+The `~` argument between the `MAXLEN` strategy and the `threshold` means that
+the user is requesting to trim the stream so its length is **at least** the
+`threshold`, but possibly slightly more. In this case, Redis will stop trimming
+early when performance can be gained (for example, when a whole macro node in
+the data structure can't be removed). This makes trimming much more efficient,
+and it is usually what you want, although after trimming, the stream may have
+few tens of additional entries over the `threshold`.
+
+Another way to control the amount of work done by the command when using the
+`~`, is the `LIMIT` clause. When used, it specifies the maximal `count` of
+entries that will be evicted. When `LIMIT` and `count` aren't specified, the
+default value of 100 \* the number of entries in a macro node will be implicitly
+used as the `count`. Specifying the value 0 as `count` disables the limiting
+mechanism entirely.
@return
-@integer-reply, specifically:
+@integer-reply: The number of entries deleted from the stream.
+
+@history
+
+- `>= 6.2`: Added the `MINID` trimming strategy and the `LIMIT` option.
-The command returns the number of entries deleted from the stream.
+@examples
```cli
XADD mystream * field1 A field2 B field3 C field4 D
diff --git a/iredis/data/commands/zadd.md b/iredis/data/commands/zadd.md
index 589eaf3..f14b085 100644
--- a/iredis/data/commands/zadd.md
+++ b/iredis/data/commands/zadd.md
@@ -10,13 +10,17 @@ not hold a sorted set, an error is returned.
The score values should be the string representation of a double precision
floating point number. `+inf` and `-inf` values are valid values as well.
-## ZADD options (Redis 3.0.2 or greater)
+## ZADD options
ZADD supports a list of options, specified after the name of the key and before
the first score argument. Options are:
-- **XX**: Only update elements that already exist. Never add elements.
-- **NX**: Don't update already existing elements. Always add new elements.
+- **XX**: Only update elements that already exist. Don't add new elements.
+- **NX**: Only add new elements. Don't update already existing elements.
+- **LT**: Only update existing elements if the new score is **less than** the
+ current score. This flag doesn't prevent adding new elements.
+- **GT**: Only update existing elements if the new score is **greater than** the
+ current score. This flag doesn't prevent adding new elements.
- **CH**: Modify the return value from the number of new elements added, to the
total number of elements changed (CH is an abbreviation of _changed_). Changed
elements are **new elements added** and elements already existing for which
@@ -26,6 +30,8 @@ the first score argument. Options are:
- **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one
score-element pair can be specified in this mode.
+Note: The **GT**, **LT** and **NX** options are mutually exclusive.
+
## Range of integer scores that can be expressed precisely
Redis sorted sets use a _double 64-bit floating point number_ to represent the
@@ -74,8 +80,10 @@ is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`).
@integer-reply, specifically:
-- The number of elements added to the sorted set, not including elements already
- existing for which the score was updated.
+- When used without optional arguments, the number of elements added to the
+ sorted set (excluding score updates).
+- If the `CH` option is specified, the number of elements that were changed
+ (added or updated).
If the `INCR` option is specified, the return value will be @bulk-string-reply:
@@ -87,6 +95,8 @@ If the `INCR` option is specified, the return value will be @bulk-string-reply:
- `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was
possible to add or update a single member per call.
+- `>= 3.0.2`: Added the `XX`, `NX`, `CH` and `INCR` options.
+- `>= 6.2`: Added the `GT` and `LT` options.
@examples
diff --git a/iredis/data/commands/zdiff.md b/iredis/data/commands/zdiff.md
new file mode 100644
index 0000000..1b587b9
--- /dev/null
+++ b/iredis/data/commands/zdiff.md
@@ -0,0 +1,19 @@
+This command is similar to `ZDIFFSTORE`, but instead of storing the resulting
+sorted set, it is returned to the client.
+
+@return
+
+@array-reply: the result of the difference (optionally with their scores, in
+case the `WITHSCORES` option is given).
+
+@examples
+
+```cli
+ZADD zset1 1 "one"
+ZADD zset1 2 "two"
+ZADD zset1 3 "three"
+ZADD zset2 1 "one"
+ZADD zset2 2 "two"
+ZDIFF 2 zset1 zset2
+ZDIFF 2 zset1 zset2 WITHSCORES
+```
diff --git a/iredis/data/commands/zdiffstore.md b/iredis/data/commands/zdiffstore.md
new file mode 100644
index 0000000..abe3ba7
--- /dev/null
+++ b/iredis/data/commands/zdiffstore.md
@@ -0,0 +1,24 @@
+Computes the difference between the first and all successive input sorted sets
+and stores the result in `destination`. The total number of input keys is
+specified by `numkeys`.
+
+Keys that do not exist are considered to be empty sets.
+
+If `destination` already exists, it is overwritten.
+
+@return
+
+@integer-reply: the number of elements in the resulting sorted set at
+`destination`.
+
+@examples
+
+```cli
+ZADD zset1 1 "one"
+ZADD zset1 2 "two"
+ZADD zset1 3 "three"
+ZADD zset2 1 "one"
+ZADD zset2 2 "two"
+ZDIFFSTORE out 2 zset1 zset2
+ZRANGE out 0 -1 WITHSCORES
+```
diff --git a/iredis/data/commands/zinter.md b/iredis/data/commands/zinter.md
new file mode 100644
index 0000000..297c912
--- /dev/null
+++ b/iredis/data/commands/zinter.md
@@ -0,0 +1,21 @@
+This command is similar to `ZINTERSTORE`, but instead of storing the resulting
+sorted set, it is returned to the client.
+
+For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`.
+
+@return
+
+@array-reply: the result of intersection (optionally with their scores, in case
+the `WITHSCORES` option is given).
+
+@examples
+
+```cli
+ZADD zset1 1 "one"
+ZADD zset1 2 "two"
+ZADD zset2 1 "one"
+ZADD zset2 2 "two"
+ZADD zset2 3 "three"
+ZINTER 2 zset1 zset2
+ZINTER 2 zset1 zset2 WITHSCORES
+```
diff --git a/iredis/data/commands/zmscore.md b/iredis/data/commands/zmscore.md
new file mode 100644
index 0000000..69818fb
--- /dev/null
+++ b/iredis/data/commands/zmscore.md
@@ -0,0 +1,18 @@
+Returns the scores associated with the specified `members` in the sorted set
+stored at `key`.
+
+For every `member` that does not exist in the sorted set, a `nil` value is
+returned.
+
+@return
+
+@array-reply: list of scores or `nil` associated with the specified `member`
+values (a double precision floating point number), represented as strings.
+
+@examples
+
+```cli
+ZADD myzset 1 "one"
+ZADD myzset 2 "two"
+ZMSCORE myzset "one" "two" "nofield"
+```
diff --git a/iredis/data/commands/zrandmember.md b/iredis/data/commands/zrandmember.md
new file mode 100644
index 0000000..fba72d3
--- /dev/null
+++ b/iredis/data/commands/zrandmember.md
@@ -0,0 +1,50 @@
+When called with just the `key` argument, return a random element from the
+sorted set value stored at `key`.
+
+If the provided `count` argument is positive, return an array of **distinct
+elements**. The array's length is either `count` or the sorted set's cardinality
+(`ZCARD`), whichever is lower.
+
+If called with a negative `count`, the behavior changes and the command is
+allowed to return the **same element multiple times**. In this case, the number
+of returned elements is the absolute value of the specified `count`.
+
+The optional `WITHSCORES` modifier changes the reply so it includes the
+respective scores of the randomly selected elements from the sorted set.
+
+@return
+
+@bulk-string-reply: without the additional `count` argument, the command returns
+a Bulk Reply with the randomly selected element, or `nil` when `key` does not
+exist.
+
+@array-reply: when the additional `count` argument is passed, the command
+returns an array of elements, or an empty array when `key` does not exist. If
+the `WITHSCORES` modifier is used, the reply is a list elements and their scores
+from the sorted set.
+
+@examples
+
+```cli
+ZADD dadi 1 uno 2 due 3 tre 4 quattro 5 cinque 6 sei
+ZRANDMEMBER dadi
+ZRANDMEMBER dadi
+ZRANDMEMBER dadi -5 WITHSCORES
+```
+
+## Specification of the behavior when count is passed
+
+When the `count` argument is a positive value this command behaves as follows:
+
+- No repeated elements are returned.
+- If `count` is bigger than the cardinality of the sorted set, the command will
+ only return the whole sorted set without additional elements.
+- The order of elements in the reply is not truly random, so it is up to the
+ client to shuffle them if needed.
+
+When the `count` is a negative value, the behavior changes as follows:
+
+- Repeating elements are possible.
+- Exactly `count` elements, or an empty array if the sorted set is empty
+ (non-existing key), are always returned.
+- The order of elements in the reply is truly random.
diff --git a/iredis/data/commands/zrange.md b/iredis/data/commands/zrange.md
index e2b1c5b..f079b61 100644
--- a/iredis/data/commands/zrange.md
+++ b/iredis/data/commands/zrange.md
@@ -1,34 +1,137 @@
-Returns the specified range of elements in the sorted set stored at `key`. The
-elements are considered to be ordered from the lowest to the highest score.
-Lexicographical order is used for elements with equal score.
+Returns the specified range of elements in the sorted set stored at `<key>`.
-See `ZREVRANGE` when you need the elements ordered from highest to lowest score
-(and descending lexicographical order for elements with equal score).
+`ZRANGE` can perform different types of range queries: by index (rank), by the
+score, or by lexicographical order.
-Both `start` and `stop` are zero-based indexes, where `0` is the first element,
-`1` is the next element and so on. They can also be negative numbers indicating
-offsets from the end of the sorted set, with `-1` being the last element of the
-sorted set, `-2` the penultimate element and so on.
+Starting with Redis 6.2.0, this command can replace the following commands:
+`ZREVRANGE`, `ZRANGEBYSCORE`, `ZREVRANGEBYSCORE`, `ZRANGEBYLEX` and
+`ZREVRANGEBYLEX`.
-`start` and `stop` are **inclusive ranges**, so for example `ZRANGE myzset 0 1`
-will return both the first and the second element of the sorted set.
+## Common behavior and options
-Out of range indexes will not produce an error. If `start` is larger than the
-largest index in the sorted set, or `start > stop`, an empty list is returned.
-If `stop` is larger than the end of the sorted set Redis will treat it like it
-is the last element of the sorted set.
+The order of elements is from the lowest to the highest score. Elements with the
+same score are ordered lexicographically.
-It is possible to pass the `WITHSCORES` option in order to return the scores of
-the elements together with the elements. The returned list will contain
+The optional `REV` argument reverses the ordering, so elements are ordered from
+highest to lowest score, and score ties are resolved by reverse lexicographical
+ordering.
+
+The optional `LIMIT` argument can be used to obtain a sub-range from the
+matching elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative
+`<count>` returns all elements from the `<offset>`. Keep in mind that if
+`<offset>` is large, the sorted set needs to be traversed for `<offset>`
+elements before getting to the elements to return, which can add up to O(N) time
+complexity.
+
+The optional `WITHSCORES` argument supplements the command's reply with the
+scores of elements returned. The returned list contains
`value1,score1,...,valueN,scoreN` instead of `value1,...,valueN`. Client
libraries are free to return a more appropriate data type (suggestion: an array
with (value, score) arrays/tuples).
+## Index ranges
+
+By default, the command performs an index range query. The `<min>` and `<max>`
+arguments represent zero-based indexes, where `0` is the first element, `1` is
+the next element, and so on. These arguments specify an **inclusive range**, so
+for example, `ZRANGE myzset 0 1` will return both the first and the second
+element of the sorted set.
+
+The indexes can also be negative numbers indicating offsets from the end of the
+sorted set, with `-1` being the last element of the sorted set, `-2` the
+penultimate element, and so on.
+
+Out of range indexes do not produce an error.
+
+If `<min>` is greater than either the end index of the sorted set or `<max>`, an
+empty list is returned.
+
+If `<max>` is greater than the end index of the sorted set, Redis will use the
+last element of the sorted set.
+
+## Score ranges
+
+When the `BYSCORE` option is provided, the command behaves like `ZRANGEBYSCORE`
+and returns the range of elements from the sorted set having scores equal or
+between `<min>` and `<max>`.
+
+`<min>` and `<max>` can be `-inf` and `+inf`, denoting the negative and positive
+infinities, respectively. This means that you are not required to know the
+highest or lowest score in the sorted set to get all elements from or up to a
+certain score.
+
+By default, the score intervals specified by `<min>` and `<max>` are closed
+(inclusive). It is possible to specify an open interval (exclusive) by prefixing
+the score with the character `(`.
+
+For example:
+
+```
+ZRANGE zset (1 5 BYSCORE
+```
+
+Will return all elements with `1 < score <= 5` while:
+
+```
+ZRANGE zset (5 (10 BYSCORE
+```
+
+Will return all the elements with `5 < score < 10` (5 and 10 excluded).
+
+## Lexicographical ranges
+
+When the `BYLEX` option is used, the command behaves like `ZRANGEBYLEX` and
+returns the range of elements from the sorted set between the `<min>` and
+`<max>` lexicographical closed range intervals.
+
+Note that lexicographical ordering relies on all elements having the same score.
+The reply is unspecified when the elements have different scores.
+
+Valid `<min>` and `<max>` must start with `(` or `[`, in order to specify
+whether the range interval is exclusive or inclusive, respectively.
+
+The special values of `+` or `-` `<min>` and `<max>` mean positive and negative
+infinite strings, respectively, so for instance the command **ZRANGEBYLEX
+myzset - +** is guaranteed to return all the elements in the sorted set,
+providing that all the elements have the same score.
+
+### Lexicographical comparison of strings
+
+Strings are compared as a binary array of bytes. Because of how the ASCII
+character set is specified, this means that usually this also have the effect of
+comparing normal ASCII characters in an obvious dictionary way. However, this is
+not true if non-plain ASCII strings are used (for example, utf8 strings).
+
+However, the user can apply a transformation to the encoded string so that the
+first part of the element inserted in the sorted set will compare as the user
+requires for the specific application. For example, if I want to add strings
+that will be compared in a case-insensitive way, but I still want to retrieve
+the real case when querying, I can add strings in the following way:
+
+ ZADD autocomplete 0 foo:Foo 0 bar:BAR 0 zap:zap
+
+Because of the first _normalized_ part in every element (before the colon
+character), we are forcing a given comparison. However, after the range is
+queried using `ZRANGE ... BYLEX`, the application can display to the user the
+second part of the string, after the colon.
+
+The binary nature of the comparison allows to use sorted sets as a general
+purpose index, for example, the first part of the element can be a 64-bit
+big-endian number. Since big-endian numbers have the most significant bytes in
+the initial positions, the binary comparison will match the numerical comparison
+of the numbers. This can be used in order to implement range queries on 64-bit
+values. As in the example below, after the first 8 bytes, we can store the value
+of the element we are indexing.
+
@return
@array-reply: list of elements in the specified range (optionally with their
scores, in case the `WITHSCORES` option is given).
+@history
+
+- `>= 6.2`: Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options.
+
@examples
```cli
@@ -47,3 +150,10 @@ _score_2_, ..., _element_N_, _score_N_.
```cli
ZRANGE myzset 0 1 WITHSCORES
```
+
+This example shows how to query the sorted set by score, excluding the value `1`
+and up to infinity, returning only the second element of the result:
+
+```cli
+ZRANGE myzset (1 +inf BYSCORE LIMIT 1 1
+```
diff --git a/iredis/data/commands/zrangebylex.md b/iredis/data/commands/zrangebylex.md
index ab387bd..55f3002 100644
--- a/iredis/data/commands/zrangebylex.md
+++ b/iredis/data/commands/zrangebylex.md
@@ -9,6 +9,9 @@ The elements are considered to be ordered from lower to higher strings as
compared byte-by-byte using the `memcmp()` C function. Longer strings are
considered greater than shorter strings if the common part is identical.
+As per Redis 6.2.0, this command is considered deprecated. Please prefer using
+the `ZRANGE` command with the `BYLEX` argument in new code.
+
The optional `LIMIT` argument can be used to only get a range of the matching
elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count`
returns all elements from the `offset`. Keep in mind that if `offset` is large,
diff --git a/iredis/data/commands/zrangebyscore.md b/iredis/data/commands/zrangebyscore.md
index f440e0e..e102eed 100644
--- a/iredis/data/commands/zrangebyscore.md
+++ b/iredis/data/commands/zrangebyscore.md
@@ -6,6 +6,9 @@ The elements having the same score are returned in lexicographical order (this
follows from a property of the sorted set implementation in Redis and does not
involve further computation).
+As per Redis 6.2.0, this command is considered deprecated. Please prefer using
+the `ZRANGE` command with the `BYSCORE` argument in new code.
+
The optional `LIMIT` argument can be used to only get a range of the matching
elements (similar to _SELECT LIMIT offset, count_ in SQL). A negative `count`
returns all elements from the `offset`. Keep in mind that if `offset` is large,
diff --git a/iredis/data/commands/zrangestore.md b/iredis/data/commands/zrangestore.md
new file mode 100644
index 0000000..ba2b805
--- /dev/null
+++ b/iredis/data/commands/zrangestore.md
@@ -0,0 +1,14 @@
+This command is like `ZRANGE`, but stores the result in the `<dst>` destination
+key.
+
+@return
+
+@integer-reply: the number of elements in the resulting sorted set.
+
+@examples
+
+```cli
+ZADD srczset 1 "one" 2 "two" 3 "three" 4 "four"
+ZRANGESTORE dstzset srczset 2 -1
+ZRANGE dstzset 0 -1
+```
diff --git a/iredis/data/commands/zremrangebylex.md b/iredis/data/commands/zremrangebylex.md
index ceaf69f..cc2cf30 100644
--- a/iredis/data/commands/zremrangebylex.md
+++ b/iredis/data/commands/zremrangebylex.md
@@ -4,7 +4,7 @@ sorted set stored at `key` between the lexicographical range specified by `min`
and `max`.
The meaning of `min` and `max` are the same of the `ZRANGEBYLEX` command.
-Similarly, this command actually returns the same elements that `ZRANGEBYLEX`
+Similarly, this command actually removes the same elements that `ZRANGEBYLEX`
would return if called with the same `min` and `max` arguments.
@return
diff --git a/iredis/data/commands/zrevrange.md b/iredis/data/commands/zrevrange.md
index c9f6c4d..a7a667c 100644
--- a/iredis/data/commands/zrevrange.md
+++ b/iredis/data/commands/zrevrange.md
@@ -4,6 +4,9 @@ Descending lexicographical order is used for elements with equal score.
Apart from the reversed ordering, `ZREVRANGE` is similar to `ZRANGE`.
+As per Redis 6.2.0, this command is considered deprecated. Please prefer using
+the `ZRANGE` command with the `REV` argument in new code.
+
@return
@array-reply: list of elements in the specified range (optionally with their
diff --git a/iredis/data/commands/zrevrangebylex.md b/iredis/data/commands/zrevrangebylex.md
index 831e5cd..1cd9de0 100644
--- a/iredis/data/commands/zrevrangebylex.md
+++ b/iredis/data/commands/zrevrangebylex.md
@@ -4,6 +4,9 @@ sorted set at `key` with a value between `max` and `min`.
Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to `ZRANGEBYLEX`.
+As per Redis 6.2.0, this command is considered deprecated. Please prefer using
+the `ZRANGE` command with the `BYLEX` and `REV` arguments in new code.
+
@return
@array-reply: list of elements in the specified score range.
diff --git a/iredis/data/commands/zrevrangebyscore.md b/iredis/data/commands/zrevrangebyscore.md
index c16d8b4..d41652c 100644
--- a/iredis/data/commands/zrevrangebyscore.md
+++ b/iredis/data/commands/zrevrangebyscore.md
@@ -9,6 +9,9 @@ order.
Apart from the reversed ordering, `ZREVRANGEBYSCORE` is similar to
`ZRANGEBYSCORE`.
+As per Redis 6.2.0, this command is considered deprecated. Please prefer using
+the `ZRANGE` command with the `BYSCORE` and `REV` arguments in new code.
+
@return
@array-reply: list of elements in the specified score range (optionally with
diff --git a/iredis/data/commands/zunion.md b/iredis/data/commands/zunion.md
new file mode 100644
index 0000000..71f737b
--- /dev/null
+++ b/iredis/data/commands/zunion.md
@@ -0,0 +1,21 @@
+This command is similar to `ZUNIONSTORE`, but instead of storing the resulting
+sorted set, it is returned to the client.
+
+For a description of the `WEIGHTS` and `AGGREGATE` options, see `ZUNIONSTORE`.
+
+@return
+
+@array-reply: the result of union (optionally with their scores, in case the
+`WITHSCORES` option is given).
+
+@examples
+
+```cli
+ZADD zset1 1 "one"
+ZADD zset1 2 "two"
+ZADD zset2 1 "one"
+ZADD zset2 2 "two"
+ZADD zset2 3 "three"
+ZUNION 2 zset1 zset2
+ZUNION 2 zset1 zset2 WITHSCORES
+```
diff --git a/iredis/entry.py b/iredis/entry.py
index 8e396a4..eb57668 100644
--- a/iredis/entry.py
+++ b/iredis/entry.py
@@ -266,6 +266,7 @@ PAGER_HELP = """Using pager when output is too tall for your window, default to
help="Config file for iredis, default is ~/.iredisrc.",
)
@click.option("--decode", default=None, help=DECODE_HELP)
+@click.option("--client_name", help="Assign a name to the current connection.")
@click.option("--raw/--no-raw", default=None, is_flag=True, help=RAW_HELP)
@click.option("--rainbow/--no-rainbow", default=None, is_flag=True, help=RAINBOW)
@click.option("--shell/--no-shell", default=None, is_flag=True, help=SHELL)
@@ -278,6 +279,7 @@ def gather_args(
p,
n,
password,
+ client_name,
newbie,
iredisrc,
decode,
@@ -319,7 +321,8 @@ def gather_args(
if not sys.stdout.isatty():
config.raw = True
- config.newbie_mode = newbie
+ if newbie is not None:
+ config.newbie_mode = newbie
if decode is not None:
config.decode = decode
@@ -366,6 +369,7 @@ def create_client(params):
port = params["p"]
db = params["n"]
password = params["password"]
+ client_name = params["client_name"]
dsn_from_url = None
dsn = params["dsn"]
@@ -385,10 +389,19 @@ def create_client(params):
path=dsn_from_url.path,
scheme=dsn_from_url.scheme,
username=dsn_from_url.username,
+ client_name=client_name,
)
if params["socket"]:
- return Client(scheme="unix", path=params["socket"], db=db, password=password)
- return Client(host=host, port=port, db=db, password=password)
+ return Client(
+ scheme="unix",
+ path=params["socket"],
+ db=db,
+ password=password,
+ client_name=client_name,
+ )
+ return Client(
+ host=host, port=port, db=db, password=password, client_name=client_name
+ )
def main():
diff --git a/iredis/redis_grammar.py b/iredis/redis_grammar.py
index f03459f..e680839 100644
--- a/iredis/redis_grammar.py
+++ b/iredis/redis_grammar.py
@@ -147,7 +147,7 @@ VALID_TOKEN = r"""(
)"""
PATTERN = fr"(?P<pattern>{VALID_TOKEN})"
VALID_SLOT = r"\d+" # TODO add range? max value:16384
-VALID_NODE = r"\d+"
+VALID_NODE = r"\w+"
NUM = r"\d+"
NNUM = r"-?\+?\(?\[?(\d+|inf)" # number cloud be negative
_FLOAT = r"-?(\d|\.|e)+"
diff --git a/iredis/renders.py b/iredis/renders.py
index 02470f0..2898c42 100644
--- a/iredis/renders.py
+++ b/iredis/renders.py
@@ -56,6 +56,8 @@ class OutputRender:
return value
if isinstance(value, int):
return str(value).encode()
+ if isinstance(value, str):
+ return value.encode()
if isinstance(value, list):
return _render_raw_list(value)
@@ -78,7 +80,7 @@ class OutputRender:
@staticmethod
def render_nested_pair(value):
"""
- For redis internel responses.
+ For redis internal responses.
Always decode with utf-8
Render nested list.
Items come as pairs.
@@ -169,6 +171,16 @@ class OutputRender:
return FormattedText([("class:success", text)])
@staticmethod
+ def render_help(raw):
+ """
+ render help text message.
+ the comand like ``ACL HELP`` and ``MEMORY HELP``
+ will return a list of strings.
+ we render it as plain text
+ """
+ return FormattedText([("class:string", _render_raw_list(raw).decode())])
+
+ @staticmethod
def render_transaction_queue(text):
"""
Used when client session is in a transaction.
@@ -344,6 +356,8 @@ def _render_raw_list(bytes_items):
flatten_items.append(item)
elif isinstance(item, int):
flatten_items.append(str(item).encode())
+ elif isinstance(item, str):
+ flatten_items.append(item.encode())
elif isinstance(item, list):
flatten_items.append(_render_raw_list(item))
return b"\n".join(flatten_items)
@@ -387,7 +401,7 @@ def _render_scan(render_response, response):
rendered = [
("class:type", "(cursor) "),
- ("class:integer", cursor.decode()),
+ ("class:integer", cursor if isinstance(cursor, str) else cursor.decode()),
("", "\n"),
]
rendered_keys = render_response(responses)
diff --git a/iredis/utils.py b/iredis/utils.py
index b11097d..b64f874 100644
--- a/iredis/utils.py
+++ b/iredis/utils.py
@@ -40,7 +40,7 @@ def literal_bytes(b):
return b
-def _valide_token(words):
+def _valid_token(words):
token = "".join(words).strip()
if token:
yield token
@@ -104,7 +104,10 @@ def parse_argument_to_formatted_text(
result = []
if isinstance(name, str):
_type = type_convert.get(_type, _type)
- result.append((f"class:{style_class}.{_type}", " " + name))
+ if is_option:
+ result.append((f"class:{style_class}.{_type}", f" [{name}]"))
+ else:
+ result.append((f"class:{style_class}.{_type}", f" {name}"))
elif isinstance(name, list):
for inner_name, inner_type in zip(name, _type):
inner_type = type_convert.get(inner_type, inner_type)
@@ -190,7 +193,7 @@ def _literal_bytes(b):
backslash.
"hello\" -> \"hello\\\"
- we don't add outter double quotes here, since
+ we don't add outer double quotes here, since
completer also need this function's return value
to patch completers.
@@ -233,14 +236,14 @@ def double_quotes(unquoted):
"""
Display String like redis-cli.
escape inner double quotes.
- add outter double quotes.
+ add outer double quotes.
:param unquoted: list, or str
"""
if isinstance(unquoted, str):
# escape double quote
escaped = unquoted.replace('"', '\\"')
- return f'"{escaped}"' # add outter double quotes
+ return f'"{escaped}"' # add outer double quotes
elif isinstance(unquoted, list):
return [double_quotes(item) for item in unquoted]
diff --git a/poetry.lock b/poetry.lock
index 62763af..109b298 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,276 +1,283 @@
[[package]]
-category = "dev"
-description = "Atomic file writes."
-marker = "sys_platform == \"win32\""
name = "atomicwrites"
+version = "1.4.0"
+description = "Atomic file writes."
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "1.4.0"
[[package]]
-category = "dev"
-description = "Classes Without Boilerplate"
name = "attrs"
+version = "21.2.0"
+description = "Classes Without Boilerplate"
+category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "19.3.0"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[package.extras]
-azure-pipelines = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "pytest-azurepipelines"]
-dev = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "pre-commit"]
-docs = ["sphinx", "zope.interface"]
-tests = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"]
+dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"]
+docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
+tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface"]
+tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins"]
[[package]]
-category = "main"
-description = "Composable command line interface toolkit"
name = "click"
+version = "7.1.2"
+description = "Composable command line interface toolkit"
+category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "7.1.2"
[[package]]
-category = "dev"
-description = "Cross-platform colored terminal text."
-marker = "sys_platform == \"win32\""
name = "colorama"
+version = "0.4.4"
+description = "Cross-platform colored terminal text."
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "0.4.3"
[[package]]
-category = "main"
-description = "Config file reading, writing and validation."
name = "configobj"
+version = "5.0.6"
+description = "Config file reading, writing and validation."
+category = "main"
optional = false
python-versions = "*"
-version = "5.0.6"
[package.dependencies]
six = "*"
[[package]]
-category = "dev"
-description = "Read metadata from Python packages"
-marker = "python_version < \"3.8\""
name = "importlib-metadata"
+version = "4.5.0"
+description = "Read metadata from Python packages"
+category = "dev"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
-version = "1.6.1"
+python-versions = ">=3.6"
[package.dependencies]
+typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
-docs = ["sphinx", "rst.linker"]
-testing = ["packaging", "pep517", "importlib-resources (>=1.3)"]
+docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
+testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"]
[[package]]
-category = "main"
-description = "Read resources from Python packages"
name = "importlib-resources"
+version = "5.1.4"
+description = "Read resources from Python packages"
+category = "main"
optional = false
-python-versions = ">=2.7,!=3.0,!=3.1,!=3.2,!=3.3"
-version = "1.0.2"
+python-versions = ">=3.6"
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
+testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
[[package]]
-category = "main"
-description = "The fastest markdown parser in pure Python"
name = "mistune"
+version = "0.8.4"
+description = "The fastest markdown parser in pure Python"
+category = "main"
optional = false
python-versions = "*"
-version = "0.8.4"
[[package]]
-category = "dev"
-description = "More routines for operating on iterables, beyond itertools"
name = "more-itertools"
+version = "8.8.0"
+description = "More routines for operating on iterables, beyond itertools"
+category = "dev"
optional = false
python-versions = ">=3.5"
-version = "8.4.0"
[[package]]
-category = "dev"
-description = "Core utilities for Python packages"
name = "packaging"
+version = "20.9"
+description = "Core utilities for Python packages"
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "20.4"
[package.dependencies]
pyparsing = ">=2.0.2"
-six = "*"
[[package]]
-category = "main"
-description = "Python datetimes made easy"
name = "pendulum"
+version = "2.1.2"
+description = "Python datetimes made easy"
+category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "2.1.0"
[package.dependencies]
python-dateutil = ">=2.6,<3.0"
-pytzdata = ">=2018.3"
+pytzdata = ">=2020.1"
[[package]]
-category = "dev"
-description = "Pexpect allows easy control of interactive console applications."
name = "pexpect"
+version = "4.8.0"
+description = "Pexpect allows easy control of interactive console applications."
+category = "dev"
optional = false
python-versions = "*"
-version = "4.8.0"
[package.dependencies]
ptyprocess = ">=0.5"
[[package]]
-category = "dev"
-description = "plugin and hook calling mechanisms for python"
name = "pluggy"
+version = "0.13.1"
+description = "plugin and hook calling mechanisms for python"
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "0.13.1"
[package.dependencies]
-[package.dependencies.importlib-metadata]
-python = "<3.8"
-version = ">=0.12"
+importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
[package.extras]
dev = ["pre-commit", "tox"]
[[package]]
-category = "main"
-description = "Library for building powerful interactive command lines in Python"
name = "prompt-toolkit"
+version = "3.0.3"
+description = "Library for building powerful interactive command lines in Python"
+category = "main"
optional = false
python-versions = ">=3.6"
-version = "3.0.3"
[package.dependencies]
wcwidth = "*"
[[package]]
-category = "dev"
-description = "Run a subprocess in a pseudo terminal"
name = "ptyprocess"
+version = "0.7.0"
+description = "Run a subprocess in a pseudo terminal"
+category = "dev"
optional = false
python-versions = "*"
-version = "0.6.0"
[[package]]
-category = "dev"
-description = "library with cross-python path, ini-parsing, io, code, log facilities"
name = "py"
+version = "1.10.0"
+description = "library with cross-python path, ini-parsing, io, code, log facilities"
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "1.8.2"
[[package]]
-category = "main"
-description = "Pygments is a syntax highlighting package written in Python."
name = "pygments"
+version = "2.9.0"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "main"
optional = false
python-versions = ">=3.5"
-version = "2.6.1"
[[package]]
-category = "dev"
-description = "Python parsing module"
name = "pyparsing"
+version = "2.4.7"
+description = "Python parsing module"
+category = "dev"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
-version = "2.4.7"
[[package]]
-category = "dev"
-description = "pytest: simple powerful testing with Python"
name = "pytest"
+version = "5.4.3"
+description = "pytest: simple powerful testing with Python"
+category = "dev"
optional = false
python-versions = ">=3.5"
-version = "5.4.3"
[package.dependencies]
-atomicwrites = ">=1.0"
+atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
attrs = ">=17.4.0"
-colorama = "*"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
more-itertools = ">=4.0.0"
packaging = "*"
pluggy = ">=0.12,<1.0"
py = ">=1.5.0"
wcwidth = "*"
-[package.dependencies.importlib-metadata]
-python = "<3.8"
-version = ">=0.12"
-
[package.extras]
-checkqa-mypy = ["mypy (v0.761)"]
+checkqa-mypy = ["mypy (==v0.761)"]
testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"]
[[package]]
-category = "main"
-description = "Extensions to the standard Python datetime module"
name = "python-dateutil"
+version = "2.8.1"
+description = "Extensions to the standard Python datetime module"
+category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-version = "2.8.1"
[package.dependencies]
six = ">=1.5"
[[package]]
-category = "main"
-description = "The Olson timezone database for Python."
name = "pytzdata"
+version = "2020.1"
+description = "The Olson timezone database for Python."
+category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "2019.3"
[[package]]
-category = "main"
-description = "Python client for Redis key-value store"
name = "redis"
+version = "3.5.3"
+description = "Python client for Redis key-value store"
+category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "3.5.3"
[package.extras]
hiredis = ["hiredis (>=0.1.3)"]
[[package]]
-category = "main"
-description = "Python 2 and 3 compatibility utilities"
name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
-version = "1.15.0"
[[package]]
-category = "main"
-description = "Measures number of Terminal column cells of wide-character codes"
-name = "wcwidth"
+name = "typing-extensions"
+version = "3.10.0.0"
+description = "Backported and Experimental Type Hints for Python 3.5+"
+category = "dev"
optional = false
python-versions = "*"
+
+[[package]]
+name = "wcwidth"
version = "0.1.9"
+description = "Measures number of Terminal column cells of wide-character codes"
+category = "main"
+optional = false
+python-versions = "*"
[[package]]
-category = "dev"
-description = "Backport of pathlib-compatible object wrapper for zip files"
-marker = "python_version < \"3.8\""
name = "zipp"
+version = "3.4.1"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "main"
optional = false
python-versions = ">=3.6"
-version = "3.1.0"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"]
-testing = ["jaraco.itertools", "func-timeout"]
+docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
+testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "pytest-enabler", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
[metadata]
-content-hash = "50616ff2ae7105090d8555ffaae4bceb8cfc92d7fddb70635202a5eb8b4cf701"
+lock-version = "1.1"
python-versions = "^3.6"
+content-hash = "2eaf01b87306c3d08c959827415e6075bf632ab4af523a9a3c10c53c9d54d18f"
[metadata.files]
atomicwrites = [
@@ -278,60 +285,62 @@ atomicwrites = [
{file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"},
]
attrs = [
- {file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"},
- {file = "attrs-19.3.0.tar.gz", hash = "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"},
+ {file = "attrs-21.2.0-py2.py3-none-any.whl", hash = "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1"},
+ {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"},
]
click = [
{file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"},
{file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"},
]
colorama = [
- {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"},
- {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"},
+ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
+ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
]
configobj = [
{file = "configobj-5.0.6.tar.gz", hash = "sha256:a2f5650770e1c87fb335af19a9b7eb73fc05ccf22144eb68db7d00cd2bcb0902"},
]
importlib-metadata = [
- {file = "importlib_metadata-1.6.1-py2.py3-none-any.whl", hash = "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"},
- {file = "importlib_metadata-1.6.1.tar.gz", hash = "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545"},
+ {file = "importlib_metadata-4.5.0-py3-none-any.whl", hash = "sha256:833b26fb89d5de469b24a390e9df088d4e52e4ba33b01dc5e0e4f41b81a16c00"},
+ {file = "importlib_metadata-4.5.0.tar.gz", hash = "sha256:b142cc1dd1342f31ff04bb7d022492b09920cb64fed867cd3ea6f80fe3ebd139"},
]
importlib-resources = [
- {file = "importlib_resources-1.0.2-py2.py3-none-any.whl", hash = "sha256:6e2783b2538bd5a14678284a3962b0660c715e5a0f10243fd5e00a4b5974f50b"},
- {file = "importlib_resources-1.0.2.tar.gz", hash = "sha256:d3279fd0f6f847cced9f7acc19bd3e5df54d34f93a2e7bb5f238f81545787078"},
+ {file = "importlib_resources-5.1.4-py3-none-any.whl", hash = "sha256:e962bff7440364183203d179d7ae9ad90cb1f2b74dcb84300e88ecc42dca3351"},
+ {file = "importlib_resources-5.1.4.tar.gz", hash = "sha256:54161657e8ffc76596c4ede7080ca68cb02962a2e074a2586b695a93a925d36e"},
]
mistune = [
{file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"},
{file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"},
]
more-itertools = [
- {file = "more-itertools-8.4.0.tar.gz", hash = "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5"},
- {file = "more_itertools-8.4.0-py3-none-any.whl", hash = "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2"},
+ {file = "more-itertools-8.8.0.tar.gz", hash = "sha256:83f0308e05477c68f56ea3a888172c78ed5d5b3c282addb67508e7ba6c8f813a"},
+ {file = "more_itertools-8.8.0-py3-none-any.whl", hash = "sha256:2cf89ec599962f2ddc4d568a05defc40e0a587fbc10d5989713638864c36be4d"},
]
packaging = [
- {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"},
- {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"},
+ {file = "packaging-20.9-py2.py3-none-any.whl", hash = "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"},
+ {file = "packaging-20.9.tar.gz", hash = "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5"},
]
pendulum = [
- {file = "pendulum-2.1.0-cp27-cp27m-macosx_10_13_x86_64.whl", hash = "sha256:9eda38ff65b1f297d860d3f562480e048673fb4b81fdd5c8c55decb519b97ed2"},
- {file = "pendulum-2.1.0-cp27-cp27m-win_amd64.whl", hash = "sha256:70007aebc4494163f8705909a1996ce21ab853801b57fba4c2dd53c3df5c38f0"},
- {file = "pendulum-2.1.0-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:575934b65b298eeb99c5a5b1673c945fc5c99e2b56caff772a91bc4b1eba7b82"},
- {file = "pendulum-2.1.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:d42d1e870541eeaf3fe0500aac0c76a85bd4bd53ebed74f9a7daf8f01ac77374"},
- {file = "pendulum-2.1.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:ff7f3420de0c0cf21c1fc813d581fcfa4a1fb6d87f09485880b3e1204eb9cdd7"},
- {file = "pendulum-2.1.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ac3c6a992beeb4c9bd90c317a1bb2a6cba159b49a49b6dd3c86b5bacb86f3d50"},
- {file = "pendulum-2.1.0-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:75a62e3f98499283fafe8ef4b44f81052e84825b00a0b64609dd8a06985382b9"},
- {file = "pendulum-2.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a79a72a7fd1092a7c69ddd8580a0be5365ded40c9f9c865623c7665742e3b888"},
- {file = "pendulum-2.1.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:701127e1f0ff7c253cc0c07f29becc5f9210547914e0bbe59ffd9fa064d7c3c8"},
- {file = "pendulum-2.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:816e01dcb0ba4ffcf2ceaafe4d644174fea680361e909f6f8ba0a4fdb2ccae24"},
- {file = "pendulum-2.1.0-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:edd00e6b43698762e10bfda508cc9c06bad88c0703a9b37e412aec1189e06e23"},
- {file = "pendulum-2.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4420e058110740a8193fb0709350dfc6ac790a99c345fc4e92e24df0f834ddcb"},
- {file = "pendulum-2.1.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:aa560bd39d94f3889646422f1e65b8dfd025bf6288d43e5c2e31d4f972aaf2e4"},
- {file = "pendulum-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2788945a0111d5325fd27ae3e3b18b741e440d20bdb7d4ea22fce7c9a4fbbf40"},
- {file = "pendulum-2.1.0-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:eb7e349bb2d1b2b418d094e2179d6768561e8242fd8cb640b5aaba735f3e91d1"},
- {file = "pendulum-2.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6cf0f876cd088ee1578266f4231121376747aa90c3ed3b8e212a8344a9920061"},
- {file = "pendulum-2.1.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:aa13ddea12fd871d3191f633f08090b91ea2e80fb0ed50a7a149add7f680b12d"},
- {file = "pendulum-2.1.0-cp38-cp38m-win_amd64.whl", hash = "sha256:0cbbd4f30c69a283690d9ed8e58e44a990e067e59ee05b5ef55d022b38659aeb"},
- {file = "pendulum-2.1.0.tar.gz", hash = "sha256:093cab342e10516660e64b935a6da1a043e0286de36cc229fb48471415981ffe"},
+ {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"},
+ {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"},
+ {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"},
+ {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"},
+ {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"},
+ {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"},
+ {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"},
+ {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"},
+ {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"},
+ {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"},
+ {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"},
+ {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"},
+ {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"},
+ {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"},
+ {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"},
+ {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"},
+ {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"},
+ {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"},
+ {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"},
+ {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"},
+ {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"},
]
pexpect = [
{file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
@@ -346,16 +355,16 @@ prompt-toolkit = [
{file = "prompt_toolkit-3.0.3.tar.gz", hash = "sha256:a402e9bf468b63314e37460b68ba68243d55b2f8c4d0192f85a019af3945050e"},
]
ptyprocess = [
- {file = "ptyprocess-0.6.0-py2.py3-none-any.whl", hash = "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"},
- {file = "ptyprocess-0.6.0.tar.gz", hash = "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0"},
+ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
+ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
]
py = [
- {file = "py-1.8.2-py2.py3-none-any.whl", hash = "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44"},
- {file = "py-1.8.2.tar.gz", hash = "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b"},
+ {file = "py-1.10.0-py2.py3-none-any.whl", hash = "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"},
+ {file = "py-1.10.0.tar.gz", hash = "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3"},
]
pygments = [
- {file = "Pygments-2.6.1-py2.py3-none-any.whl", hash = "sha256:aa931c0bd5daa25c475afadb2147115134cfe501f0656828cbe7cb566c7123bc"},
- {file = "Pygments-2.6.1.tar.gz", hash = "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44"},
+ {file = "Pygments-2.9.0-py3-none-any.whl", hash = "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"},
+ {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"},
]
pyparsing = [
{file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"},
@@ -370,22 +379,27 @@ python-dateutil = [
{file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"},
]
pytzdata = [
- {file = "pytzdata-2019.3-py2.py3-none-any.whl", hash = "sha256:84c52b9a47d097fcd483f047a544979de6c3a86e94c845e3569e9f8acd0fa071"},
- {file = "pytzdata-2019.3.tar.gz", hash = "sha256:fac06f7cdfa903188dc4848c655e4adaee67ee0f2fe08e7daf815cf2a761ee5e"},
+ {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"},
+ {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"},
]
redis = [
{file = "redis-3.5.3-py2.py3-none-any.whl", hash = "sha256:432b788c4530cfe16d8d943a09d40ca6c16149727e4afe8c2c9d5580c59d9f24"},
{file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"},
]
six = [
- {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"},
- {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"},
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+typing-extensions = [
+ {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"},
+ {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"},
+ {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"},
]
wcwidth = [
{file = "wcwidth-0.1.9-py2.py3-none-any.whl", hash = "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1"},
{file = "wcwidth-0.1.9.tar.gz", hash = "sha256:ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1"},
]
zipp = [
- {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"},
- {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"},
+ {file = "zipp-3.4.1-py3-none-any.whl", hash = "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098"},
+ {file = "zipp-3.4.1.tar.gz", hash = "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76"},
]
diff --git a/pyoxidizer.template.bzl b/pyoxidizer.template.bzl
index 612ecde..ea67485 100644
--- a/pyoxidizer.template.bzl
+++ b/pyoxidizer.template.bzl
@@ -1,6 +1,7 @@
# This file defines how PyOxidizer application building and packaging is
-# performed. See the pyoxidizer crate's documentation for extensive
-# documentation on this file format.
+# performed. See PyOxidizer's documentation at
+# https://pyoxidizer.readthedocs.io/en/stable/ for details of this
+# configuration file format.
# Obtain the default PythonDistribution for our build target. We link
# this distribution into our produced executable and extract the Python
@@ -12,52 +13,244 @@ def make_dist():
# This function creates a Python executable and installs it in a destination
# directory.
def make_exe(dist):
- # This variable defines the configuration of the
- # embedded Python interpreter.
- python_config = PythonInterpreterConfig(
- run_eval="from iredis.entry import main; main()",
- # Allows the executable to load deps from this folder
- sys_paths=["$ORIGIN/lib"]
- )
+ # This function creates a `PythonPackagingPolicy` instance, which
+ # influences how executables are built and how resources are added to
+ # the executable. You can customize the default behavior by assigning
+ # to attributes and calling functions.
+ policy = dist.make_python_packaging_policy()
+
+ # Enable support for non-classified "file" resources to be added to
+ # resource collections.
+ # policy.allow_files = True
+
+ # Control support for loading Python extensions and other shared libraries
+ # from memory. This is only supported on Windows and is ignored on other
+ # platforms.
+ # policy.allow_in_memory_shared_library_loading = True
+
+ # Control whether to generate Python bytecode at various optimization
+ # levels. The default optimization level used by Python is 0.
+ # policy.bytecode_optimize_level_zero = True
+ # policy.bytecode_optimize_level_one = True
+ policy.bytecode_optimize_level_two = True
+
+ # Package all available Python extensions in the distribution.
+ policy.extension_module_filter = "all"
+
+ # Package the minimum set of Python extensions in the distribution needed
+ # to run a Python interpreter. Various functionality from the Python
+ # standard library won't work with this setting! But it can be used to
+ # reduce the size of generated executables by omitting unused extensions.
+ # policy.extension_module_filter = "minimal"
+
+ # Package Python extensions in the distribution not having additional
+ # library dependencies. This will exclude working support for SSL,
+ # compression formats, and other functionality.
+ # policy.extension_module_filter = "no-libraries"
+
+ # Package Python extensions in the distribution not having a dependency on
+ # copyleft licensed software like GPL.
+ # policy.extension_module_filter = "no-copyleft"
+
+ # Controls whether the file scanner attempts to classify files and emit
+ # resource-specific values.
+ # policy.file_scanner_classify_files = True
+
+ # Controls whether `File` instances are emitted by the file scanner.
+ # policy.file_scanner_emit_files = False
+
+ # Controls the `add_include` attribute of "classified" resources
+ # (`PythonModuleSource`, `PythonPackageResource`, etc).
+ # policy.include_classified_resources = True
+
+ # Toggle whether Python module source code for modules in the Python
+ # distribution's standard library are included.
+ policy.include_distribution_sources = True
+
+ # Toggle whether Python package resource files for the Python standard
+ # library are included.
+ policy.include_distribution_resources = False
+
+ # Controls the `add_include` attribute of `File` resources.
+ policy.include_file_resources = False
+
+ # Controls the `add_include` attribute of `PythonModuleSource` not in
+ # the standard library.
+ # policy.include_non_distribution_sources = True
+
+ # Toggle whether files associated with tests are included.
+ policy.include_test = False
+
+ # Resources are loaded from "in-memory" or "filesystem-relative" paths.
+ # The locations to attempt to add resources to are defined by the
+ # `resources_location` and `resources_location_fallback` attributes.
+ # The former is the first/primary location to try and the latter is
+ # an optional fallback.
+
+ # Use in-memory location for adding resources by default.
+ # policy.resources_location = "in-memory"
+
+ # Use filesystem-relative location for adding resources by default.
+ policy.resources_location = "filesystem-relative:lib"
+
+ # Attempt to add resources relative to the built binary when
+ # `resources_location` fails.
+ # policy.resources_location_fallback = "filesystem-relative:prefix"
+
+ # Clear out a fallback resource location.
+ # policy.resources_location_fallback = None
+
+ # Define a preferred Python extension module variant in the Python distribution
+ # to use.
+ # policy.set_preferred_extension_module_variant("foo", "bar")
+
+ # Configure policy values to classify files as typed resources.
+ # (This is the default.)
+ # policy.set_resource_handling_mode("classify")
+
+ # Configure policy values to handle files as files and not attempt
+ # to classify files as specific types.
+ # policy.set_resource_handling_mode("files")
+
+ # This variable defines the configuration of the embedded Python
+ # interpreter. By default, the interpreter will run a Python REPL
+ # using settings that are appropriate for an "isolated" run-time
+ # environment.
+ #
+ # The configuration of the embedded Python interpreter can be modified
+ # by setting attributes on the instance. Some of these are
+ # documented below.
+ python_config = dist.make_python_interpreter_config()
+
+ # Make the embedded interpreter behave like a `python` process.
+ # python_config.config_profile = "python"
+
+ # Set initial value for `sys.path`. If the string `$ORIGIN` exists in
+ # a value, it will be expanded to the directory of the built executable.
+ python_config.module_search_paths = ["$ORIGIN/lib"]
+
+ # Use jemalloc as Python's memory allocator.
+ # python_config.allocator_backend = "jemalloc"
+
+ # Use mimalloc as Python's memory allocator.
+ # python_config.allocator_backend = "mimalloc"
+
+ # Use snmalloc as Python's memory allocator.
+ # python_config.allocator_backend = "snmalloc"
+
+ # Let Python choose which memory allocator to use. (This will likely
+ # use the malloc()/free() linked into the program.
+ python_config.allocator_backend = "default"
+
+ # Enable the use of a custom allocator backend with the "raw" memory domain.
+ # python_config.allocator_raw = True
+
+ # Enable the use of a custom allocator backend with the "mem" memory domain.
+ # python_config.allocator_mem = True
+
+ # Enable the use of a custom allocator backend with the "obj" memory domain.
+ # python_config.allocator_obj = True
+
+ # Enable the use of a custom allocator backend with pymalloc's arena
+ # allocator.
+ # python_config.allocator_pymalloc_arena = True
+
+ # Enable Python memory allocator debug hooks.
+ # python_config.allocator_debug = True
+
+ # Control whether `oxidized_importer` is the first importer on
+ # `sys.meta_path`.
+ # python_config.oxidized_importer = False
+
+ # Enable the standard path-based importer which attempts to load
+ # modules from the filesystem.
+ # python_config.filesystem_importer = True
+
+ # Set `sys.frozen = True`
+ # python_config.sys_frozen = True
+
+ # Set `sys.meipass`
+ # python_config.sys_meipass = True
+
+ # Write files containing loaded modules to the directory specified
+ # by the given environment variable.
+ # python_config.write_modules_directory_env = "/tmp/oxidized/loaded_modules"
+
+ # Evaluate a string as Python code when the interpreter starts.
+ python_config.run_command = "from iredis.entry import main; main()"
+
+ # Run a Python module as __main__ when the interpreter starts.
+ # python_config.run_module = "<module>"
+
+ # Run a Python file when the interpreter starts.
+ # python_config.run_filename = "/path/to/file"
# Produce a PythonExecutable from a Python distribution, embedded
# resources, and other options. The returned object represents the
# standalone executable that will be built.
exe = dist.to_python_executable(
name="iredis",
+
+ # If no argument passed, the default `PythonPackagingPolicy` for the
+ # distribution is used.
+ packaging_policy=policy,
+
+ # If no argument passed, the default `PythonInterpreterConfig` is used.
config=python_config,
- # Embed all extension modules, making this a fully-featured Python.
- extension_module_filter='all',
+ )
- # Only package the minimal set of extension modules needed to initialize
- # a Python interpreter. Many common packages in Python's standard
- # library won't work with this setting.
- #extension_module_filter='minimal',
+ # Install tcl/tk support files to a specified directory so the `tkinter` Python
+ # module works.
+ # exe.tcl_files_path = "lib"
- # Only package extension modules that don't require linking against
- # non-Python libraries. e.g. will exclude support for OpenSSL, SQLite3,
- # other features that require external libraries.
- #extension_module_filter='no-libraries',
+ # Never attempt to copy Windows runtime DLLs next to the built executable.
+ # exe.windows_runtime_dlls_mode = "never"
- # Only package extension modules that don't link against GPL licensed
- # libraries.
- #extension_module_filter='no-gpl',
+ # Copy Windows runtime DLLs next to the built executable when they can be
+ # located.
+ # exe.windows_runtime_dlls_mode = "when-present"
- # Include Python module sources. This isn't strictly required and it does
- # make binary sizes larger. But having the sources can be useful for
- # activities such as debugging.
- include_sources=True,
+ # Copy Windows runtime DLLs next to the build executable and error if this
+ # cannot be done.
+ # exe.windows_runtime_dlls_mode = "always"
- # Whether to include non-module resource data/files.
- include_resources=False,
+ # Make the executable a console application on Windows.
+ # exe.windows_subsystem = "console"
- # Do not include functionality for testing Python itself.
- include_test=False,
- )
+ # Make the executable a non-console application on Windows.
+ # exe.windows_subsystem = "windows"
+
+ # Invoke `pip download` to install a single package using wheel archives
+ # obtained via `pip download`. `pip_download()` returns objects representing
+ # collected files inside Python wheels. `add_python_resources()` adds these
+ # objects to the binary, with a load location as defined by the packaging
+ # policy's resource location attributes.
+ #exe.add_python_resources(exe.pip_download(["pyflakes==2.2.0"]))
+
+ # Invoke `pip install` with our Python distribution to install a single package.
+ # `pip_install()` returns objects representing installed files.
+ # `add_python_resources()` adds these objects to the binary, with a load
+ # location as defined by the packaging policy's resource location
+ # attributes.
+ exe.add_python_resources(exe.pip_install(["$WHEEL_PATH"]))
+
+ # Invoke `pip install` using a requirements file and add the collected resources
+ # to our binary.
+ #exe.add_python_resources(exe.pip_install(["-r", "requirements.txt"]))
+
+
+
+ # Read Python files from a local directory and add them to our embedded
+ # context, taking just the resources belonging to the `foo` and `bar`
+ # Python packages.
+ #exe.add_python_resources(exe.read_package_root(
+ # path="/src/mypackage",
+ # packages=["foo", "bar"],
+ #))
# Discover Python files from a virtualenv and add them to our embedded
# context.
- #exe.add_python_resources(dist.read_virtualenv(path="/path/to/venv"))
+ #exe.add_python_resources(exe.read_virtualenv(path="/path/to/venv"))
# Filter all resources collected so far through a filter of names
# in a file.
@@ -67,27 +260,75 @@ def make_exe(dist):
# referenced by other consumers of this target.
return exe
-def make_embedded_data(exe):
- return exe.to_embedded_data()
+def make_embedded_resources(exe):
+ return exe.to_embedded_resources()
-def make_install(dist, exe):
+def make_install(exe):
# Create an object that represents our installed application file layout.
files = FileManifest()
# Add the generated executable to our install layout in the root directory.
files.add_python_resource(".", exe)
- # Include pip dependencies alongside the executable
- # WHEEL_PATH will be replaced with envsubst because pyoxidizer doesn't support env vars
- files.add_python_resources("lib", dist.pip_install(["$WHEEL_PATH"]))
-
return files
+def make_msi(exe):
+ # See the full docs for more. But this will convert your Python executable
+ # into a `WiXMSIBuilder` Starlark type, which will be converted to a Windows
+ # .msi installer when it is built.
+ return exe.to_wix_msi_builder(
+ # Simple identifier of your app.
+ "iredis",
+ # The name of your application.
+ "iredis",
+ # The version of your application.
+ "1.9.1",
+ # The author/manufacturer of your application.
+ "laixintao"
+ )
+
+
+# Dynamically enable automatic code signing.
+def register_code_signers():
+ # You will need to run with `pyoxidizer build --var ENABLE_CODE_SIGNING 1` for
+ # this if block to be evaluated.
+ if not VARS.get("ENABLE_CODE_SIGNING"):
+ return
+
+ # Use a code signing certificate in a .pfx/.p12 file, prompting the
+ # user for its path and password to open.
+ # pfx_path = prompt_input("path to code signing certificate file")
+ # pfx_password = prompt_password(
+ # "password for code signing certificate file",
+ # confirm = True
+ # )
+ # signer = code_signer_from_pfx_file(pfx_path, pfx_password)
+
+ # Use a code signing certificate in the Windows certificate store, specified
+ # by its SHA-1 thumbprint. (This allows you to use YubiKeys and other
+ # hardware tokens if they speak to the Windows certificate APIs.)
+ # sha1_thumbprint = prompt_input(
+ # "SHA-1 thumbprint of code signing certificate in Windows store"
+ # )
+ # signer = code_signer_from_windows_store_sha1_thumbprint(sha1_thumbprint)
+
+ # Choose a code signing certificate automatically from the Windows
+ # certificate store.
+ # signer = code_signer_from_windows_store_auto()
+
+ # Activate your signer so it gets called automatically.
+ # signer.activate()
+
+
+# Call our function to set up automatic code signers.
+register_code_signers()
+
# Tell PyOxidizer about the build targets defined above.
register_target("dist", make_dist)
-register_target("exe", make_exe, depends=["dist"], default=True)
-register_target("embedded", make_embedded_data, depends=["exe"], default_build_script=True)
-register_target("install", make_install, depends=["dist", "exe"])
+register_target("exe", make_exe, depends=["dist"])
+register_target("resources", make_embedded_resources, depends=["exe"], default_build_script=True)
+register_target("install", make_install, depends=["exe"], default=True)
+register_target("msi_installer", make_msi, depends=["exe"])
# Resolve whatever targets the invoker of this configuration file is requesting
# be resolved.
@@ -98,5 +339,5 @@ resolve_targets()
# Everything below this is typically managed by PyOxidizer and doesn't need
# to be updated by people.
-PYOXIDIZER_VERSION = "0.6.0"
-PYOXIDIZER_COMMIT = ""
+PYOXIDIZER_VERSION = "0.14.1"
+PYOXIDIZER_COMMIT = "UNKNOWN"
diff --git a/pyproject.toml b/pyproject.toml
index 76a0b7b..aba1fba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "iredis"
-version = "1.9.1"
+version = "1.9.4"
description = "Terminal client for Redis with auto-completion and syntax highlighting."
authors = ["laixintao <laixintao1995@163.com>"]
readme = 'README.md'
@@ -18,6 +18,7 @@ classifiers = [
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Database",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
@@ -37,7 +38,7 @@ mistune = "^0.8"
configobj = "^5.0"
click = "^7.0"
pendulum = "^2.0"
-importlib-resources = "1.0.2"
+importlib-resources = "^5.1.0"
# wcwidth 0.2.x uses pkg_resources which is not supported by PyOxidizer
wcwidth = "0.1.9"
diff --git a/tests/cli_tests/__init__.py b/tests/cli_tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/cli_tests/__init__.py
diff --git a/tests/cli_tests/test_cli_start.py b/tests/cli_tests/test_cli_start.py
index d33185a..00fcbf7 100644
--- a/tests/cli_tests/test_cli_start.py
+++ b/tests/cli_tests/test_cli_start.py
@@ -10,6 +10,14 @@ def test_start_on_connection_error():
cli.close()
+def test_start_with_client_name():
+ cli = pexpect.spawn("iredis --client_name custom_name", timeout=2)
+ cli.expect("iredis")
+ cli.sendline("CLIENT GETNAME")
+ cli.expect("custom_name")
+ cli.close()
+
+
def test_short_help_option(config):
c = pexpect.spawn("iredis -h", timeout=2)
diff --git a/tests/cli_tests/test_command_input.py b/tests/cli_tests/test_command_input.py
index 4f970f5..f70ee3c 100644
--- a/tests/cli_tests/test_command_input.py
+++ b/tests/cli_tests/test_command_input.py
@@ -1,3 +1,4 @@
+import os
import pytest
@@ -8,8 +9,13 @@ def test_wrong_select_db_index(cli):
cli.sendline("select 128")
cli.expect(["DB index is out of range", "127.0.0.1:6379[1]>"])
+ if int(os.environ["REDIS_VERSION"]) > 5:
+ text = "value is not an integer or out of range"
+ else:
+ text = "invalid DB index"
+
cli.sendline("select abc")
- cli.expect(["invalid DB index", "127.0.0.1:6379[1]>"])
+ cli.expect([text, "127.0.0.1:6379[1]>"])
cli.sendline("select 15")
cli.expect("OK")
@@ -53,3 +59,16 @@ def test_auth_hidden_password(clean_redis, cli):
def test_hello_command_is_not_supported(cli):
cli.sendline("hello 3")
cli.expect("IRedis currently not support RESP3")
+
+
+def test_abort_reading_connection(cli):
+ cli.sendline("blpop mylist 30")
+ cli.send(chr(3))
+ cli.expect(
+ r"KeyboardInterrupt received! User canceled reading response!", timeout=10
+ )
+
+ cli.sendline("set foo bar")
+ cli.expect("OK")
+ cli.sendline("get foo")
+ cli.expect("bar")
diff --git a/tests/cli_tests/test_shell_pipeline.py b/tests/cli_tests/test_shell_pipeline.py
index 4bacf8d..8fe5e14 100644
--- a/tests/cli_tests/test_shell_pipeline.py
+++ b/tests/cli_tests/test_shell_pipeline.py
@@ -9,3 +9,13 @@ def test_running_disable_shell_pipeline():
cli.sendline("get foo | grep w")
cli.expect(r"hello")
cli.close()
+
+
+def test_running_disable_shell_pipeline_with_decode_option():
+ cli = pexpect.spawn("iredis -n 15 --decode=utf-8", timeout=2)
+ cli.expect("127.0.0.1")
+ cli.sendline("set foo hello")
+ cli.expect("OK")
+ cli.sendline("get foo | cat")
+ cli.expect(r"hello")
+ cli.close()
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 0000000..8cf2069
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,11 @@
+import re
+
+
+def formatted_text_rematch(value_to_test, expected_formatted_text):
+ """
+ ``expected_formatted_text`` can be regex.
+ """
+ for value, expected in zip(value_to_test, expected_formatted_text):
+ assert value[0] == expected[0]
+ print(expected[1], value[1])
+ assert re.match(expected[1], value[1])
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/unittests/__init__.py
diff --git a/tests/unittests/command_parse/test_cluster.py b/tests/unittests/command_parse/test_cluster.py
index e60fe92..6dd877f 100644
--- a/tests/unittests/command_parse/test_cluster.py
+++ b/tests/unittests/command_parse/test_cluster.py
@@ -26,9 +26,15 @@ def test_command_cluster_count_failure_reports(judge_command):
)
judge_command("cluster count-failure-reports 1 2 3 4", None)
judge_command("cluster count-failure-reports 1 a", None)
- judge_command("cluster count-failure-reports a", None)
+ judge_command(
+ "cluster count-failure-reports a",
+ {"command": "cluster count-failure-reports", "node": "a"},
+ )
judge_command("cluster count-failure-reports a 2", None)
- judge_command("cluster count-failure-reports abc", None)
+ judge_command(
+ "cluster count-failure-reports abc",
+ {"command": "cluster count-failure-reports", "node": "abc"},
+ )
def test_command_cluster_countkeysinslot(judge_command):
@@ -88,9 +94,22 @@ def test_command_cluster_forget(judge_command):
)
judge_command("cluster forget 1 2 3 4", None)
judge_command("cluster forget 1 a", None)
- judge_command("cluster forget a", None)
+ judge_command("cluster forget a", {"command": "cluster forget", "node": "a"})
judge_command("cluster forget a 2", None)
- judge_command("cluster forget abc", None)
+ judge_command(
+ "cluster forget abc",
+ {
+ "command": "cluster forget",
+ "node": "abc",
+ },
+ )
+ judge_command(
+ "cluster forget 07c37dfeb235213a872192d90877d0cd55635b91",
+ {
+ "command": "cluster forget",
+ "node": "07c37dfeb235213a872192d90877d0cd55635b91",
+ },
+ )
def test_command_cluster_getkeysinslot(judge_command):
@@ -209,6 +228,15 @@ def test_command_cluster_set_slot(judge_command):
},
)
judge_command(
+ "cluster setslot 123 node e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca",
+ {
+ "command": "cluster setslot",
+ "slot": "123",
+ "slotsubcmd": "node",
+ "node": "e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca",
+ },
+ )
+ judge_command(
"cluster setslot 123 MIGRATING 123123",
{
"command": "cluster setslot",
diff --git a/tests/unittests/test_client.py b/tests/unittests/test_client.py
index e78da17..5887579 100644
--- a/tests/unittests/test_client.py
+++ b/tests/unittests/test_client.py
@@ -11,6 +11,7 @@ from iredis.config import config, load_config_files
from iredis.completers import IRedisCompleter
from iredis.entry import Rainbow, prompt_message
from iredis.exceptions import NotSupport
+from ..helpers import formatted_text_rematch
@pytest.fixture
@@ -258,19 +259,15 @@ def test_peek_string(iredis_client, clean_redis):
clean_redis.set("foo", "bar")
peek_result = list(iredis_client.do_peek("foo"))
- assert peek_result == [
- FormattedText(
- [
- ("class:dockey", "key: "),
- ("", "string (embstr) mem: 50 bytes, ttl: -1"),
- ("", "\n"),
- ("class:dockey", "strlen: "),
- ("", "3"),
- ("", "\n"),
- ("class:dockey", "value: "),
- ("", '"bar"'),
- ]
- )
+ assert peek_result[0][0] == ("class:dockey", "key: ")
+ assert re.match(r"string \(embstr\) mem: \d+ bytes, ttl: -1", peek_result[0][1][1])
+ assert peek_result[0][2:] == [
+ ("", "\n"),
+ ("class:dockey", "strlen: "),
+ ("", "3"),
+ ("", "\n"),
+ ("class:dockey", "value: "),
+ ("", '"bar"'),
]
@@ -278,39 +275,40 @@ def test_peek_list_fetch_all(iredis_client, clean_redis):
clean_redis.lpush("mylist", *[f"hello-{index}" for index in range(5)])
peek_result = list(iredis_client.do_peek("mylist"))
- assert peek_result == [
+ formatted_text_rematch(
+ peek_result[0],
FormattedText(
[
("class:dockey", "key: "),
- ("", "list (quicklist) mem: 176 bytes, ttl: -1"),
+ ("", r"list \(quicklist\) mem: \d+ bytes, ttl: -1"),
("", "\n"),
("class:dockey", "llen: "),
("", "5"),
("", "\n"),
("class:dockey", "elements: "),
("", "\n"),
- ("", "1)"),
+ ("", r"1\)"),
("", " "),
("class:string", '"hello-4"'),
("", "\n"),
- ("", "2)"),
+ ("", r"2\)"),
("", " "),
("class:string", '"hello-3"'),
("", "\n"),
- ("", "3)"),
+ ("", r"3\)"),
("", " "),
("class:string", '"hello-2"'),
("", "\n"),
- ("", "4)"),
+ ("", r"4\)"),
("", " "),
("class:string", '"hello-1"'),
("", "\n"),
- ("", "5)"),
+ ("", r"5\)"),
("", " "),
("class:string", '"hello-0"'),
]
- )
- ]
+ ),
+ )
def test_peek_list_fetch_part(iredis_client, clean_redis):
@@ -338,18 +336,21 @@ def test_peek_zset_fetch_all(iredis_client, clean_redis):
"myzset", dict(zip([f"hello-{index}" for index in range(3)], range(3)))
)
peek_result = list(iredis_client.do_peek("myzset"))
- assert peek_result[0][0:9] == FormattedText(
- [
- ("class:dockey", "key: "),
- ("", "zset (ziplist) mem: 92 bytes, ttl: -1"),
- ("", "\n"),
- ("class:dockey", "zcount: "),
- ("", "3"),
- ("", "\n"),
- ("class:dockey", "members: "),
- ("", "\n"),
- ("", "1)"),
- ]
+ formatted_text_rematch(
+ peek_result[0][0:9],
+ FormattedText(
+ [
+ ("class:dockey", "key: "),
+ ("", r"zset \(ziplist\) mem: \d+ bytes, ttl: -1"),
+ ("", "\n"),
+ ("class:dockey", "zcount: "),
+ ("", "3"),
+ ("", "\n"),
+ ("class:dockey", "members: "),
+ ("", "\n"),
+ ("", r"1\)"),
+ ]
+ ),
)
@@ -358,17 +359,20 @@ def test_peek_zset_fetch_part(iredis_client, clean_redis):
"myzset", dict(zip([f"hello-{index}" for index in range(40)], range(40)))
)
peek_result = list(iredis_client.do_peek("myzset"))
- assert peek_result[0][0:8] == FormattedText(
- [
- ("class:dockey", "key: "),
- ("", "zset (ziplist) mem: 556 bytes, ttl: -1"),
- ("", "\n"),
- ("class:dockey", "zcount: "),
- ("", "40"),
- ("", "\n"),
- ("class:dockey", "members (first 40): "),
- ("", "\n"),
- ]
+ formatted_text_rematch(
+ peek_result[0][0:8],
+ FormattedText(
+ [
+ ("class:dockey", "key: "),
+ ("", r"zset \(ziplist\) mem: \d+ bytes, ttl: -1"),
+ ("", "\n"),
+ ("class:dockey", "zcount: "),
+ ("", "40"),
+ ("", "\n"),
+ ("class:dockey", r"members \(first 40\): "),
+ ("", "\n"),
+ ]
+ ),
)
@@ -395,10 +399,12 @@ def test_peek_stream(iredis_client, clean_redis):
clean_redis.xadd("mystream", {"foo": "bar", "hello": "world"})
peek_result = list(iredis_client.do_peek("mystream"))
- assert peek_result[0][0:18] == FormattedText(
+ assert peek_result[0][0] == ("class:dockey", "key: ")
+ assert re.match(
+ r"stream \((stream|unknown)\) mem: 6\d\d bytes, ttl: -1", peek_result[0][1][1]
+ )
+ assert peek_result[0][2:18] == FormattedText(
[
- ("class:dockey", "key: "),
- ("", "stream (unknown) mem: 601 bytes, ttl: -1"),
("", "\n"),
("class:dockey", "XINFO: "),
("", "\n"),
diff --git a/tests/unittests/test_render_functions.py b/tests/unittests/test_render_functions.py
index 2da9b35..30b328e 100644
--- a/tests/unittests/test_render_functions.py
+++ b/tests/unittests/test_render_functions.py
@@ -484,3 +484,9 @@ def test_render_bytes(config):
def test_render_bytes_raw(config):
assert renders.OutputRender.render_raw(b"bytes\n") == b"bytes\n"
+
+
+def test_render_help(config):
+ assert renders.OutputRender.render_help([b"foo", b"bar"]) == FormattedText(
+ [("class:string", "foo\nbar")]
+ )
diff --git a/tests/unittests/test_utils.py b/tests/unittests/test_utils.py
index c9b5eff..98ea8db 100644
--- a/tests/unittests/test_utils.py
+++ b/tests/unittests/test_utils.py
@@ -53,11 +53,11 @@ def test_timer():
(r""" "hello\"world" """, ['hello"world']),
(r"''", [""]), # set foo "" is a legal command
(r'""', [""]), # set foo "" is a legal command
- (r"\\", ["\\\\"]), # blackslash are legal
- ("\\hello\\", ["\\hello\\"]), # blackslash are legal
+ (r"\\", ["\\\\"]), # backslash are legal
+ ("\\hello\\", ["\\hello\\"]), # backslash are legal
],
)
-def test_stipe_quote_escaple_in_quote(test_input, expected):
+def test_stripe_quote_escape_in_quote(test_input, expected):
assert list(strip_quote_args(test_input)) == expected