summaryrefslogtreecommitdiffstats
path: root/dependencies/pkg/mod/github.com/go-redis/redis
diff options
context:
space:
mode:
Diffstat (limited to 'dependencies/pkg/mod/github.com/go-redis/redis')
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml1
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md49
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml5
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml10
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml36
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml11
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml19
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml17
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore3
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml4
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml4
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md177
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE25
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile35
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md175
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md15
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go309
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go412
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go1750
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go109
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go1283
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go3478
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go96
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go3475
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go5522
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go4
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go144
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go80
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go634
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go95
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go49
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod20
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum108
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go56
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go78
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go71
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go201
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go178
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go93
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go29
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go18
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go26
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go60
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go97
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go121
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go9
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go36
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go557
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go58
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go201
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go458
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go13
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go332
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go72
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go180
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go50
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go155
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go93
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go50
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go12
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go21
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go46
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go12
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go19
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go23
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go67
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go77
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go136
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go448
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go429
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go216
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json8
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go147
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go104
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go157
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go668
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go495
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go392
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go773
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go449
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go180
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go736
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go645
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go65
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh9
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh69
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh42
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go796
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go287
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf10
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go149
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go151
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go215
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go40
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go6
95 files changed, 29545 insertions, 0 deletions
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml
new file mode 100644
index 0000000..707670d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml
@@ -0,0 +1 @@
+custom: ['https://uptrace.dev/sponsor']
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..3f934f8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,49 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+---
+
+Issue tracker is used for reporting bugs and discussing new features. Please use
+[stackoverflow](https://stackoverflow.com) for supporting issues.
+
+<!--- Provide a general summary of the issue in the Title above -->
+
+## Expected Behavior
+
+<!--- Tell us what should happen -->
+
+## Current Behavior
+
+<!--- Tell us what happens instead of the expected behavior -->
+
+## Possible Solution
+
+<!--- Not obligatory, but suggest a fix/reason for the bug, -->
+
+## Steps to Reproduce
+
+<!--- Provide a link to a live example, or an unambiguous set of steps to -->
+<!--- reproduce this bug. Include code to reproduce, if relevant -->
+
+1.
+2.
+3.
+4.
+
+## Context (Environment)
+
+<!--- How has this issue affected you? What are you trying to accomplish? -->
+<!--- Providing context helps us come up with a solution that is most useful in the real world -->
+
+<!--- Provide a general summary of the issue in the Title above -->
+
+## Detailed Description
+
+<!--- Provide a detailed description of the change or addition you are proposing -->
+
+## Possible Implementation
+
+<!--- Not obligatory, but suggest an idea for implementing addition or change -->
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..e86d7a6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Discussions
+ url: https://github.com/go-redis/redis/discussions
+ about: Ask a question via GitHub Discussions
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml
new file mode 100644
index 0000000..77b7be5
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+- package-ecosystem: gomod
+ directory: /
+ schedule:
+ interval: weekly
+- package-ecosystem: github-actions
+ directory: /
+ schedule:
+ interval: weekly
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml
new file mode 100644
index 0000000..a574e2e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml
@@ -0,0 +1,36 @@
+name: Go
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+ branches: [master]
+
+jobs:
+ build:
+ name: build
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [1.16.x, 1.17.x]
+
+ services:
+ redis:
+ image: redis
+ options: >-
+ --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
+ ports:
+ - 6379:6379
+
+ steps:
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Test
+ run: make test
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
new file mode 100644
index 0000000..5fcfeae
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
@@ -0,0 +1,11 @@
+name: Lint Commit Messages
+on: [pull_request]
+
+jobs:
+ commitlint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ - uses: wagoid/commitlint-github-action@v4
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..28c16c5
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
@@ -0,0 +1,19 @@
+name: golangci-lint
+
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+ - main
+ pull_request:
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v2
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
new file mode 100644
index 0000000..685693a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
@@ -0,0 +1,17 @@
+name: Releases
+
+on:
+ push:
+ tags:
+ - 'v*'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: ncipollo/release-action@v1
+ with:
+ body:
+ Please refer to
+ [CHANGELOG.md](https://github.com/go-redis/redis/blob/master/CHANGELOG.md) for details
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
new file mode 100644
index 0000000..b975a7b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml
new file mode 100644
index 0000000..de51455
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml
@@ -0,0 +1,4 @@
+run:
+ concurrency: 8
+ deadline: 5m
+ tests: false
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
new file mode 100644
index 0000000..195e519
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
@@ -0,0 +1,177 @@
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
+
+
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+ single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+ decision:
+
+ - Traces become smaller and less noisy.
+ - It may be costly to process those 3 extra spans for each query.
+ - go-redis no longer depends on OpenTelemetry.
+
+ Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+ Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+ `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+ you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+ struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+ [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+ using `context.Context` yet, the simplest option is to define global package variable
+ `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+ [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+ existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+ NewConsistentHash: func() {
+ return consistenthash.New(100, crc32.ChecksumIEEE)
+ },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+ URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+ interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+ transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+ when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+ detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+ the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+ `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+ occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+ Servers that don't have cluster mode enabled. See
+ https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE
new file mode 100644
index 0000000..298bed9
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
new file mode 100644
index 0000000..a4cfe05
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
@@ -0,0 +1,35 @@
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+fmt:
+ gofmt -w -s ./
+ goimports -w -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+ go get -u && go mod tidy
+ set -e; for dir in $(PACKAGE_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u && \
+ go mod tidy); \
+ done
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
new file mode 100644
index 0000000..f3b6a01
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
@@ -0,0 +1,175 @@
+# Redis client for Go
+
+![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
+
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+ [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+ [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
+ without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "github.com/go-redis/redis/v8"
+ "fmt"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+ <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go
new file mode 100644
index 0000000..8382806
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go
@@ -0,0 +1,309 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+var ctx = context.TODO()
+
+type ClientStub struct {
+ Cmdable
+ resp []byte
+}
+
+func NewClientStub(resp []byte) *ClientStub {
+ stub := &ClientStub{
+ resp: resp,
+ }
+ stub.Cmdable = NewClient(&Options{
+ PoolSize: 128,
+ Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return stub.stubConn(), nil
+ },
+ })
+ return stub
+}
+
+func NewClusterClientStub(resp []byte) *ClientStub {
+ stub := &ClientStub{
+ resp: resp,
+ }
+
+ client := NewClusterClient(&ClusterOptions{
+ PoolSize: 128,
+ Addrs: []string{"127.0.0.1:6379"},
+ Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return stub.stubConn(), nil
+ },
+ ClusterSlots: func(_ context.Context) ([]ClusterSlot, error) {
+ return []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:6379"}},
+ },
+ }, nil
+ },
+ })
+
+ // init command.
+ tmpClient := NewClient(&Options{Addr: ":6379"})
+ cmdsInfo, err := tmpClient.Command(ctx).Result()
+ _ = tmpClient.Close()
+ client.cmdsInfoCache = newCmdsInfoCache(func(_ context.Context) (map[string]*CommandInfo, error) {
+ return cmdsInfo, err
+ })
+
+ stub.Cmdable = client
+ return stub
+}
+
+func (c *ClientStub) stubConn() *ConnStub {
+ return &ConnStub{
+ resp: c.resp,
+ }
+}
+
+type ConnStub struct {
+ resp []byte
+ pos int
+}
+
+func (c *ConnStub) Read(b []byte) (n int, err error) {
+ if len(c.resp) == 0 {
+ return 0, io.EOF
+ }
+
+ if c.pos >= len(c.resp) {
+ c.pos = 0
+ }
+ n = copy(b, c.resp[c.pos:])
+ c.pos += n
+ return n, nil
+}
+
+func (c *ConnStub) Write(b []byte) (n int, err error) { return len(b), nil }
+func (c *ConnStub) Close() error { return nil }
+func (c *ConnStub) LocalAddr() net.Addr { return nil }
+func (c *ConnStub) RemoteAddr() net.Addr { return nil }
+func (c *ConnStub) SetDeadline(_ time.Time) error { return nil }
+func (c *ConnStub) SetReadDeadline(_ time.Time) error { return nil }
+func (c *ConnStub) SetWriteDeadline(_ time.Time) error { return nil }
+
+type ClientStubFunc func([]byte) *ClientStub
+
+func BenchmarkDecode(b *testing.B) {
+ type Benchmark struct {
+ name string
+ stub ClientStubFunc
+ }
+
+ benchmarks := []Benchmark{
+ {"single", NewClientStub},
+ {"cluster", NewClusterClientStub},
+ }
+
+ for _, bench := range benchmarks {
+ b.Run(fmt.Sprintf("RespError-%s", bench.name), func(b *testing.B) {
+ respError(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespStatus-%s", bench.name), func(b *testing.B) {
+ respStatus(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespInt-%s", bench.name), func(b *testing.B) {
+ respInt(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespString-%s", bench.name), func(b *testing.B) {
+ respString(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespArray-%s", bench.name), func(b *testing.B) {
+ respArray(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespPipeline-%s", bench.name), func(b *testing.B) {
+ respPipeline(b, bench.stub)
+ })
+ b.Run(fmt.Sprintf("RespTxPipeline-%s", bench.name), func(b *testing.B) {
+ respTxPipeline(b, bench.stub)
+ })
+
+ // goroutine
+ b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=5", bench.name), func(b *testing.B) {
+ dynamicGoroutine(b, bench.stub, 5)
+ })
+ b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=20", bench.name), func(b *testing.B) {
+ dynamicGoroutine(b, bench.stub, 20)
+ })
+ b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=50", bench.name), func(b *testing.B) {
+ dynamicGoroutine(b, bench.stub, 50)
+ })
+ b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=100", bench.name), func(b *testing.B) {
+ dynamicGoroutine(b, bench.stub, 100)
+ })
+
+ b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=5", bench.name), func(b *testing.B) {
+ staticGoroutine(b, bench.stub, 5)
+ })
+ b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=20", bench.name), func(b *testing.B) {
+ staticGoroutine(b, bench.stub, 20)
+ })
+ b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=50", bench.name), func(b *testing.B) {
+ staticGoroutine(b, bench.stub, 50)
+ })
+ b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=100", bench.name), func(b *testing.B) {
+ staticGoroutine(b, bench.stub, 100)
+ })
+ }
+}
+
+func respError(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("-ERR test error\r\n"))
+ respErr := proto.RedisError("ERR test error")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if err := rdb.Get(ctx, "key").Err(); err != respErr {
+ b.Fatalf("response error, got %q, want %q", err, respErr)
+ }
+ }
+}
+
+func respStatus(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("+OK\r\n"))
+ var val string
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if val = rdb.Set(ctx, "key", "value", 0).Val(); val != "OK" {
+ b.Fatalf("response error, got %q, want OK", val)
+ }
+ }
+}
+
+func respInt(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte(":10\r\n"))
+ var val int64
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if val = rdb.Incr(ctx, "key").Val(); val != 10 {
+ b.Fatalf("response error, got %q, want 10", val)
+ }
+ }
+}
+
+func respString(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("$5\r\nhello\r\n"))
+ var val string
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if val = rdb.Get(ctx, "key").Val(); val != "hello" {
+ b.Fatalf("response error, got %q, want hello", val)
+ }
+ }
+}
+
+func respArray(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("*3\r\n$5\r\nhello\r\n:10\r\n+OK\r\n"))
+ var val []interface{}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if val = rdb.MGet(ctx, "key").Val(); len(val) != 3 {
+ b.Fatalf("response error, got len(%d), want len(3)", len(val))
+ }
+ }
+}
+
+func respPipeline(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("+OK\r\n$5\r\nhello\r\n:1\r\n"))
+ var pipe Pipeliner
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pipe = rdb.Pipeline()
+ set := pipe.Set(ctx, "key", "value", 0)
+ get := pipe.Get(ctx, "key")
+ del := pipe.Del(ctx, "key")
+ _, err := pipe.Exec(ctx)
+ if err != nil {
+ b.Fatalf("response error, got %q, want nil", err)
+ }
+ if set.Val() != "OK" || get.Val() != "hello" || del.Val() != 1 {
+ b.Fatal("response error")
+ }
+ }
+}
+
+func respTxPipeline(b *testing.B, stub ClientStubFunc) {
+ rdb := stub([]byte("+OK\r\n+QUEUED\r\n+QUEUED\r\n+QUEUED\r\n*3\r\n+OK\r\n$5\r\nhello\r\n:1\r\n"))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var set *StatusCmd
+ var get *StringCmd
+ var del *IntCmd
+ _, err := rdb.TxPipelined(ctx, func(pipe Pipeliner) error {
+ set = pipe.Set(ctx, "key", "value", 0)
+ get = pipe.Get(ctx, "key")
+ del = pipe.Del(ctx, "key")
+ return nil
+ })
+ if err != nil {
+ b.Fatalf("response error, got %q, want nil", err)
+ }
+ if set.Val() != "OK" || get.Val() != "hello" || del.Val() != 1 {
+ b.Fatal("response error")
+ }
+ }
+}
+
+func dynamicGoroutine(b *testing.B, stub ClientStubFunc, concurrency int) {
+ rdb := stub([]byte("$5\r\nhello\r\n"))
+ c := make(chan struct{}, concurrency)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c <- struct{}{}
+ go func() {
+ if val := rdb.Get(ctx, "key").Val(); val != "hello" {
+ panic(fmt.Sprintf("response error, got %q, want hello", val))
+ }
+ <-c
+ }()
+ }
+ // Here no longer wait for all goroutines to complete, it will not affect the test results.
+ close(c)
+}
+
+func staticGoroutine(b *testing.B, stub ClientStubFunc, concurrency int) {
+ rdb := stub([]byte("$5\r\nhello\r\n"))
+ c := make(chan struct{}, concurrency)
+
+ b.ResetTimer()
+
+ for i := 0; i < concurrency; i++ {
+ go func() {
+ for {
+ _, ok := <-c
+ if !ok {
+ return
+ }
+ if val := rdb.Get(ctx, "key").Val(); val != "hello" {
+ panic(fmt.Sprintf("response error, got %q, want hello", val))
+ }
+ }
+ }()
+ }
+ for i := 0; i < b.N; i++ {
+ c <- struct{}{}
+ }
+ close(c)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go
new file mode 100644
index 0000000..ba81ce8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go
@@ -0,0 +1,412 @@
+package redis_test
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+)
+
+func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
+ client := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ DialTimeout: time.Second,
+ ReadTimeout: time.Second,
+ WriteTimeout: time.Second,
+ PoolSize: poolSize,
+ })
+ if err := client.FlushDB(ctx).Err(); err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func BenchmarkRedisPing(b *testing.B) {
+ ctx := context.Background()
+ rdb := benchmarkRedisClient(ctx, 10)
+ defer rdb.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := rdb.Ping(ctx).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkSetGoroutines(b *testing.B) {
+ ctx := context.Background()
+ rdb := benchmarkRedisClient(ctx, 10)
+ defer rdb.Close()
+
+ for i := 0; i < b.N; i++ {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 1000; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ err := rdb.Set(ctx, "hello", "world", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+ }()
+ }
+
+ wg.Wait()
+ }
+}
+
+func BenchmarkRedisGetNil(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Get(ctx, "key").Err(); err != redis.Nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+type setStringBenchmark struct {
+ poolSize int
+ valueSize int
+}
+
+func (bm setStringBenchmark) String() string {
+ return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
+}
+
+func BenchmarkRedisSetString(b *testing.B) {
+ benchmarks := []setStringBenchmark{
+ {10, 64},
+ {10, 1024},
+ {10, 64 * 1024},
+ {10, 1024 * 1024},
+ {10, 10 * 1024 * 1024},
+
+ {100, 64},
+ {100, 1024},
+ {100, 64 * 1024},
+ {100, 1024 * 1024},
+ {100, 10 * 1024 * 1024},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.String(), func(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, bm.poolSize)
+ defer client.Close()
+
+ value := strings.Repeat("1", bm.valueSize)
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Set(ctx, "key", value, 0).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+ }
+}
+
+func BenchmarkRedisSetGetBytes(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ value := bytes.Repeat([]byte{'1'}, 10000)
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set(ctx, "key", value, 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+
+ got, err := client.Get(ctx, "key").Bytes()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if !bytes.Equal(got, value) {
+ b.Fatalf("got != value")
+ }
+ }
+ })
+}
+
+func BenchmarkRedisMGet(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.MGet(ctx, "key1", "key2").Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkSetExpire(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+ if err := client.Expire(ctx, "key", time.Second).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkPipeline(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "key", "hello", 0)
+ pipe.Expire(ctx, "key", time.Second)
+ return nil
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkZAdd(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.ZAdd(ctx, "key", &redis.Z{
+ Score: float64(1),
+ Member: "hello",
+ }).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkXRead(b *testing.B) {
+ ctx := context.Background()
+ client := benchmarkRedisClient(ctx, 10)
+ defer client.Close()
+
+ args := redis.XAddArgs{
+ Stream: "1",
+ ID: "*",
+ Values: map[string]string{"uno": "dos"},
+ }
+
+ lenStreams := 16
+ streams := make([]string, 0, lenStreams)
+ for i := 0; i < lenStreams; i++ {
+ streams = append(streams, strconv.Itoa(i))
+ }
+ for i := 0; i < lenStreams; i++ {
+ streams = append(streams, "0")
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ client.XAdd(ctx, &args)
+
+ err := client.XRead(ctx, &redis.XReadArgs{
+ Streams: streams,
+ Count: 1,
+ Block: time.Second,
+ }).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+var clientSink *redis.Client
+
+func BenchmarkWithContext(b *testing.B) {
+ ctx := context.Background()
+ rdb := benchmarkRedisClient(ctx, 10)
+ defer rdb.Close()
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ clientSink = rdb.WithContext(ctx)
+ }
+}
+
+var ringSink *redis.Ring
+
+func BenchmarkRingWithContext(b *testing.B) {
+ ctx := context.Background()
+ rdb := redis.NewRing(&redis.RingOptions{})
+ defer rdb.Close()
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ ringSink = rdb.WithContext(ctx)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+func newClusterScenario() *clusterScenario {
+ return &clusterScenario{
+ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+ nodeIDs: make([]string, 6),
+ processes: make(map[string]*redisProcess, 6),
+ clients: make(map[string]*redis.Client, 6),
+ }
+}
+
+func BenchmarkClusterPing(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ cluster := newClusterScenario()
+ if err := startCluster(ctx, cluster); err != nil {
+ b.Fatal(err)
+ }
+ defer cluster.Close()
+
+ client := cluster.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Ping(ctx).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkClusterDoInt(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ cluster := newClusterScenario()
+ if err := startCluster(ctx, cluster); err != nil {
+ b.Fatal(err)
+ }
+ defer cluster.Close()
+
+ client := cluster.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Do(ctx, "SET", 10, 10).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkClusterSetString(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ ctx := context.Background()
+ cluster := newClusterScenario()
+ if err := startCluster(ctx, cluster); err != nil {
+ b.Fatal(err)
+ }
+ defer cluster.Close()
+
+ client := cluster.newClusterClient(ctx, redisClusterOptions())
+ defer client.Close()
+
+ value := string(bytes.Repeat([]byte{'1'}, 10000))
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ err := client.Set(ctx, "key", value, 0).Err()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+var clusterSink *redis.ClusterClient
+
+func BenchmarkClusterWithContext(b *testing.B) {
+ ctx := context.Background()
+ rdb := redis.NewClusterClient(&redis.ClusterOptions{})
+ defer rdb.Close()
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ clusterSink = rdb.WithContext(ctx)
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go
new file mode 100644
index 0000000..a54f2f3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go
@@ -0,0 +1,1750 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 3
+ }
+
+ if opt.RouteByLatency || opt.RouteRandomly {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: disableIdleCheck,
+
+ TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ n.Client.Ping(context.TODO())
+ dur += uint64(time.Since(start) / time.Microsecond)
+ }
+
+ latency := float64(dur) / float64(numProbe)
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+
+ c.mu.RLock()
+ closed := c.closed //nolint:ifshort
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = c.activeAddrs
+ } else {
+ addrs = c.addrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Failing() {
+ continue
+ }
+ if node == nil || n.Latency() < node.Latency() {
+ node = n
+ }
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ // If all nodes are failing - return random node
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ if len(nodes) == 1 {
+ return nodes[0], nil
+ }
+ randomNodes := rand.Perm(len(nodes))
+ for _, idx := range randomNodes {
+ if node := nodes[idx]; !node.Failing() {
+ return node, nil
+ }
+ }
+ return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(context.Background())
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v == nil {
+ return c.Reload(ctx)
+ }
+
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload()
+ }
+ return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder //nolint:structcheck
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ *clusterClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ clusterClient: &clusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ },
+ ctx: context.Background(),
+ }
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ slot := c.cmdSlot(cmd)
+
+ var node *clusterNode
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmdInfo, slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ _ = pipe.Close()
+ ask = false
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ c.state.LazyReload()
+
+ var err error
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ for _, node := range nodes {
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
+ }
+ }
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+ err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isLoadingError(err) {
+ node.MarkAsFailing()
+ return err
+ }
+ if isRedisError(err) {
+ continue
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload()
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
+ if err != nil {
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
+ }
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload()
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+ if err != nil {
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdInfo *CommandInfo,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go
new file mode 100644
index 0000000..085bce8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = size
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ mu := &sync.Mutex{}
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptLoad(ctx, script).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ if cmd.Val() == "" {
+ cmd.val = val
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ return shard.ScriptFlush(ctx).Err()
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+
+ result := make([]bool, len(hashes))
+ for i := range result {
+ result[i] = true
+ }
+
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ mu := &sync.Mutex{}
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptExists(ctx, hashes...).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ for i, v := range val {
+ result[i] = result[i] && v
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = result
+ }
+ return nil
+ })
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go
new file mode 100644
index 0000000..6ee7364
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go
@@ -0,0 +1,1283 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+)
+
+type clusterScenario struct {
+ ports []string
+ nodeIDs []string
+ processes map[string]*redisProcess
+ clients map[string]*redis.Client
+}
+
+func (s *clusterScenario) masters() []*redis.Client {
+ result := make([]*redis.Client, 3)
+ for pos, port := range s.ports[:3] {
+ result[pos] = s.clients[port]
+ }
+ return result
+}
+
+func (s *clusterScenario) slaves() []*redis.Client {
+ result := make([]*redis.Client, 3)
+ for pos, port := range s.ports[3:] {
+ result[pos] = s.clients[port]
+ }
+ return result
+}
+
+func (s *clusterScenario) addrs() []string {
+ addrs := make([]string, len(s.ports))
+ for i, port := range s.ports {
+ addrs[i] = net.JoinHostPort("127.0.0.1", port)
+ }
+ return addrs
+}
+
+func (s *clusterScenario) newClusterClientUnstable(opt *redis.ClusterOptions) *redis.ClusterClient {
+ opt.Addrs = s.addrs()
+ return redis.NewClusterClient(opt)
+}
+
+func (s *clusterScenario) newClusterClient(
+ ctx context.Context, opt *redis.ClusterOptions,
+) *redis.ClusterClient {
+ client := s.newClusterClientUnstable(opt)
+
+ err := eventually(func() error {
+ if opt.ClusterSlots != nil {
+ return nil
+ }
+
+ state, err := client.LoadState(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !state.IsConsistent(ctx) {
+ return fmt.Errorf("cluster state is not consistent")
+ }
+
+ return nil
+ }, 30*time.Second)
+ if err != nil {
+ panic(err)
+ }
+
+ return client
+}
+
+func (s *clusterScenario) Close() error {
+ for _, port := range s.ports {
+ processes[port].Close()
+ delete(processes, port)
+ }
+ return nil
+}
+
+func startCluster(ctx context.Context, scenario *clusterScenario) error {
+ // Start processes and collect node ids
+ for pos, port := range scenario.ports {
+ process, err := startRedis(port, "--cluster-enabled", "yes")
+ if err != nil {
+ return err
+ }
+
+ client := redis.NewClient(&redis.Options{
+ Addr: ":" + port,
+ })
+
+ info, err := client.ClusterNodes(ctx).Result()
+ if err != nil {
+ return err
+ }
+
+ scenario.processes[port] = process
+ scenario.clients[port] = client
+ scenario.nodeIDs[pos] = info[:40]
+ }
+
+ // Meet cluster nodes.
+ for _, client := range scenario.clients {
+ err := client.ClusterMeet(ctx, "127.0.0.1", scenario.ports[0]).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Bootstrap masters.
+ slots := []int{0, 5000, 10000, 16384}
+ for pos, master := range scenario.masters() {
+ err := master.ClusterAddSlotsRange(ctx, slots[pos], slots[pos+1]-1).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Bootstrap slaves.
+ for idx, slave := range scenario.slaves() {
+ masterID := scenario.nodeIDs[idx]
+
+ // Wait until master is available
+ err := eventually(func() error {
+ s := slave.ClusterNodes(ctx).Val()
+ wanted := masterID
+ if !strings.Contains(s, wanted) {
+ return fmt.Errorf("%q does not contain %q", s, wanted)
+ }
+ return nil
+ }, 10*time.Second)
+ if err != nil {
+ return err
+ }
+
+ err = slave.ClusterReplicate(ctx, masterID).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Wait until all nodes have consistent info.
+ wanted := []redis.ClusterSlot{{
+ Start: 0,
+ End: 4999,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8220",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8223",
+ }},
+ }, {
+ Start: 5000,
+ End: 9999,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8221",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8224",
+ }},
+ }, {
+ Start: 10000,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8222",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8225",
+ }},
+ }}
+ for _, client := range scenario.clients {
+ err := eventually(func() error {
+ res, err := client.ClusterSlots(ctx).Result()
+ if err != nil {
+ return err
+ }
+ return assertSlotsEqual(res, wanted)
+ }, 30*time.Second)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error {
+outerLoop:
+ for _, s2 := range wanted {
+ for _, s1 := range slots {
+ if slotEqual(s1, s2) {
+ continue outerLoop
+ }
+ }
+ return fmt.Errorf("%v not found in %v", s2, slots)
+ }
+ return nil
+}
+
+func slotEqual(s1, s2 redis.ClusterSlot) bool {
+ if s1.Start != s2.Start {
+ return false
+ }
+ if s1.End != s2.End {
+ return false
+ }
+ if len(s1.Nodes) != len(s2.Nodes) {
+ return false
+ }
+ for i, n1 := range s1.Nodes {
+ if n1.Addr != s2.Nodes[i].Addr {
+ return false
+ }
+ }
+ return true
+}
+
+//------------------------------------------------------------------------------
+
+var _ = Describe("ClusterClient", func() {
+ var failover bool
+ var opt *redis.ClusterOptions
+ var client *redis.ClusterClient
+
+ assertClusterClient := func() {
+ It("supports WithContext", func() {
+ ctx, cancel := context.WithCancel(ctx)
+ cancel()
+
+ err := client.Ping(ctx).Err()
+ Expect(err).To(MatchError("context canceled"))
+ })
+
+ It("should GET/SET/DEL", func() {
+ err := client.Get(ctx, "A").Err()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.Set(ctx, "A", "VALUE", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ Eventually(func() string {
+ return client.Get(ctx, "A").Val()
+ }, 30*time.Second).Should(Equal("VALUE"))
+
+ cnt, err := client.Del(ctx, "A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(Equal(int64(1)))
+ })
+
+ It("GET follows redirects", func() {
+ err := client.Set(ctx, "A", "VALUE", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ if !failover {
+ Eventually(func() int64 {
+ nodes, err := client.Nodes(ctx, "A")
+ if err != nil {
+ return 0
+ }
+ return nodes[1].Client.DBSize(ctx).Val()
+ }, 30*time.Second).Should(Equal(int64(1)))
+
+ Eventually(func() error {
+ return client.SwapNodes(ctx, "A")
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ }
+
+ v, err := client.Get(ctx, "A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("VALUE"))
+ })
+
+ It("SET follows redirects", func() {
+ if !failover {
+ Eventually(func() error {
+ return client.SwapNodes(ctx, "A")
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ }
+
+ err := client.Set(ctx, "A", "VALUE", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.Get(ctx, "A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("VALUE"))
+ })
+
+ It("distributes keys", func() {
+ for i := 0; i < 100; i++ {
+ err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ defer GinkgoRecover()
+ Eventually(func() string {
+ return master.Info(ctx, "keyspace").Val()
+ }, 30*time.Second).Should(Or(
+ ContainSubstring("keys=31"),
+ ContainSubstring("keys=29"),
+ ContainSubstring("keys=40"),
+ ))
+ return nil
+ })
+ })
+
+ It("distributes keys when using EVAL", func() {
+ script := redis.NewScript(`
+ local r = redis.call('SET', KEYS[1], ARGV[1])
+ return r
+ `)
+
+ var key string
+ for i := 0; i < 100; i++ {
+ key = fmt.Sprintf("key%d", i)
+ err := script.Run(ctx, client, []string{key}, "value").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ defer GinkgoRecover()
+ Eventually(func() string {
+ return master.Info(ctx, "keyspace").Val()
+ }, 30*time.Second).Should(Or(
+ ContainSubstring("keys=31"),
+ ContainSubstring("keys=29"),
+ ContainSubstring("keys=40"),
+ ))
+ return nil
+ })
+ })
+
+ It("distributes scripts when using Script Load", func() {
+ client.ScriptFlush(ctx)
+
+ script := redis.NewScript(`return 'Unique script'`)
+
+ script.Load(ctx, client)
+
+ client.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ defer GinkgoRecover()
+
+ val, _ := script.Exists(ctx, shard).Result()
+ Expect(val[0]).To(Equal(true))
+ return nil
+ })
+ })
+
+ It("checks all shards when using Script Exists", func() {
+ client.ScriptFlush(ctx)
+
+ script := redis.NewScript(`return 'First script'`)
+ lostScriptSrc := `return 'Lost script'`
+ lostScript := redis.NewScript(lostScriptSrc)
+
+ script.Load(ctx, client)
+ client.Do(ctx, "script", "load", lostScriptSrc)
+
+ val, _ := client.ScriptExists(ctx, script.Hash(), lostScript.Hash()).Result()
+
+ Expect(val).To(Equal([]bool{true, false}))
+ })
+
+ It("flushes scripts from all shards when using ScriptFlush", func() {
+ script := redis.NewScript(`return 'Unnecessary script'`)
+ script.Load(ctx, client)
+
+ val, _ := client.ScriptExists(ctx, script.Hash()).Result()
+ Expect(val).To(Equal([]bool{true}))
+
+ client.ScriptFlush(ctx)
+
+ val, _ = client.ScriptExists(ctx, script.Hash()).Result()
+ Expect(val).To(Equal([]bool{false}))
+ })
+
+ It("supports Watch", func() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ n, err := tx.Get(ctx, key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ err := incr("key")
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ }
+ wg.Wait()
+
+ Eventually(func() string {
+ return client.Get(ctx, "key").Val()
+ }, 30*time.Second).Should(Equal("100"))
+ })
+
+ Describe("pipelining", func() {
+ var pipe *redis.Pipeline
+
+ assertPipeline := func() {
+ keys := []string{"A", "B", "C", "D", "E", "F", "G"}
+
+ It("follows redirects", func() {
+ if !failover {
+ for _, key := range keys {
+ Eventually(func() error {
+ return client.SwapNodes(ctx, key)
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ }
+ }
+
+ for i, key := range keys {
+ pipe.Set(ctx, key, key+"_value", 0)
+ pipe.Expire(ctx, key, time.Duration(i+1)*time.Hour)
+ }
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(14))
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ defer GinkgoRecover()
+ Eventually(func() int64 {
+ return node.DBSize(ctx).Val()
+ }, 30*time.Second).ShouldNot(BeZero())
+ return nil
+ })
+
+ if !failover {
+ for _, key := range keys {
+ Eventually(func() error {
+ return client.SwapNodes(ctx, key)
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ }
+ }
+
+ for _, key := range keys {
+ pipe.Get(ctx, key)
+ pipe.TTL(ctx, key)
+ }
+ cmds, err = pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(14))
+
+ for i, key := range keys {
+ get := cmds[i*2].(*redis.StringCmd)
+ Expect(get.Val()).To(Equal(key + "_value"))
+
+ ttl := cmds[(i*2)+1].(*redis.DurationCmd)
+ dur := time.Duration(i+1) * time.Hour
+ Expect(ttl.Val()).To(BeNumerically("~", dur, 30*time.Second))
+ }
+ })
+
+ It("works with missing keys", func() {
+ pipe.Set(ctx, "A", "A_value", 0)
+ pipe.Set(ctx, "C", "C_value", 0)
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ a := pipe.Get(ctx, "A")
+ b := pipe.Get(ctx, "B")
+ c := pipe.Get(ctx, "C")
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).To(Equal(redis.Nil))
+ Expect(cmds).To(HaveLen(3))
+
+ Expect(a.Err()).NotTo(HaveOccurred())
+ Expect(a.Val()).To(Equal("A_value"))
+
+ Expect(b.Err()).To(Equal(redis.Nil))
+ Expect(b.Val()).To(Equal(""))
+
+ Expect(c.Err()).NotTo(HaveOccurred())
+ Expect(c.Val()).To(Equal("C_value"))
+ })
+ }
+
+ Describe("with Pipeline", func() {
+ BeforeEach(func() {
+ pipe = client.Pipeline().(*redis.Pipeline)
+ })
+
+ AfterEach(func() {
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ assertPipeline()
+ })
+
+ Describe("with TxPipeline", func() {
+ BeforeEach(func() {
+ pipe = client.TxPipeline().(*redis.Pipeline)
+ })
+
+ AfterEach(func() {
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ assertPipeline()
+ })
+ })
+
+ It("supports PubSub", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ Eventually(func() error {
+ _, err := client.Publish(ctx, "mychannel", "hello").Result()
+ if err != nil {
+ return err
+ }
+
+ msg, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ if err != nil {
+ return err
+ }
+
+ _, ok := msg.(*redis.Message)
+ if !ok {
+ return fmt.Errorf("got %T, wanted *redis.Message", msg)
+ }
+
+ return nil
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ })
+
+ It("supports PubSub.Ping without channels", func() {
+ pubsub := client.Subscribe(ctx)
+ defer pubsub.Close()
+
+ err := pubsub.Ping(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ })
+ }
+
+ Describe("ClusterClient", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns pool stats", func() {
+ stats := client.PoolStats()
+ Expect(stats).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+ })
+
+ It("returns an error when there are no attempts left", func() {
+ opt := redisClusterOptions()
+ opt.MaxRedirects = -1
+ client := cluster.newClusterClient(ctx, opt)
+
+ Eventually(func() error {
+ return client.SwapNodes(ctx, "A")
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+
+ err := client.Get(ctx, "A").Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("MOVED"))
+
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("calls fn for every master node", func() {
+ for i := 0; i < 10; i++ {
+ Expect(client.Set(ctx, strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred())
+ }
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ size, err := client.DBSize(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(0)))
+ })
+
+ It("should CLUSTER SLOTS", func() {
+ res, err := client.ClusterSlots(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(3))
+
+ wanted := []redis.ClusterSlot{{
+ Start: 0,
+ End: 4999,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8220",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8223",
+ }},
+ }, {
+ Start: 5000,
+ End: 9999,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8221",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8224",
+ }},
+ }, {
+ Start: 10000,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ ID: "",
+ Addr: "127.0.0.1:8222",
+ }, {
+ ID: "",
+ Addr: "127.0.0.1:8225",
+ }},
+ }}
+ Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
+ })
+
+ It("should CLUSTER NODES", func() {
+ res, err := client.ClusterNodes(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(res)).To(BeNumerically(">", 400))
+ })
+
+ It("should CLUSTER INFO", func() {
+ res, err := client.ClusterInfo(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainSubstring("cluster_known_nodes:6"))
+ })
+
+ It("should CLUSTER KEYSLOT", func() {
+ hashSlot, err := client.ClusterKeySlot(ctx, "somekey").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey"))))
+ })
+
+ It("should CLUSTER GETKEYSINSLOT", func() {
+ keys, err := client.ClusterGetKeysInSlot(ctx, hashtag.Slot("somekey"), 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(keys)).To(Equal(0))
+ })
+
+ It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
+ n, err := client.ClusterCountFailureReports(ctx, cluster.nodeIDs[0]).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should CLUSTER COUNTKEYSINSLOT", func() {
+ n, err := client.ClusterCountKeysInSlot(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should CLUSTER SAVECONFIG", func() {
+ res, err := client.ClusterSaveConfig(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+ })
+
+ It("should CLUSTER SLAVES", func() {
+ nodesList, err := client.ClusterSlaves(ctx, cluster.nodeIDs[0]).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
+ Expect(nodesList).Should(HaveLen(1))
+ })
+
+ It("should RANDOMKEY", func() {
+ const nkeys = 100
+
+ for i := 0; i < nkeys; i++ {
+ err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ var keys []string
+ addKey := func(key string) {
+ for _, k := range keys {
+ if k == key {
+ return
+ }
+ }
+ keys = append(keys, key)
+ }
+
+ for i := 0; i < nkeys*10; i++ {
+ key := client.RandomKey(ctx).Val()
+ addKey(key)
+ }
+
+ Expect(len(keys)).To(BeNumerically("~", nkeys, nkeys/10))
+ })
+
+ It("supports Process hook", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ return node.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ clusterHook := &hook{
+ beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcess")
+ return ctx, nil
+ },
+ afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcess")
+ return nil
+ },
+ }
+ client.AddHook(clusterHook)
+
+ nodeHook := &hook{
+ beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+ return ctx, nil
+ },
+ afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+ return nil
+ },
+ }
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ node.AddHook(nodeHook)
+ return nil
+ })
+
+ err = client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "cluster.BeforeProcess",
+ "shard.BeforeProcess",
+ "shard.AfterProcess",
+ "cluster.AfterProcess",
+ }))
+
+ clusterHook.beforeProcess = nil
+ clusterHook.afterProcess = nil
+ nodeHook.beforeProcess = nil
+ nodeHook.afterProcess = nil
+ })
+
+ It("supports Pipeline hook", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ return node.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ client.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+ return nil
+ },
+ })
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ node.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+ return nil
+ },
+ })
+ return nil
+ })
+
+ _, err = client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "cluster.BeforeProcessPipeline",
+ "shard.BeforeProcessPipeline",
+ "shard.AfterProcessPipeline",
+ "cluster.AfterProcessPipeline",
+ }))
+ })
+
+ It("supports TxPipeline hook", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ return node.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ client.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+ return nil
+ },
+ })
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
+ node.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+ return nil
+ },
+ })
+ return nil
+ })
+
+ _, err = client.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "cluster.BeforeProcessPipeline",
+ "shard.BeforeProcessPipeline",
+ "shard.AfterProcessPipeline",
+ "cluster.AfterProcessPipeline",
+ }))
+ })
+
+ It("should return correct replica for key", func() {
+ client, err := client.SlaveForKey(ctx, "test")
+ Expect(err).ToNot(HaveOccurred())
+ info := client.Info(ctx, "server")
+ Expect(info.Val()).Should(ContainSubstring("tcp_port:8224"))
+ })
+
+ It("should return correct master for key", func() {
+ client, err := client.MasterForKey(ctx, "test")
+ Expect(err).ToNot(HaveOccurred())
+ info := client.Info(ctx, "server")
+ Expect(info.Val()).Should(ContainSubstring("tcp_port:8221"))
+ })
+
+ assertClusterClient()
+ })
+
+ Describe("ClusterClient with RouteByLatency", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ opt.RouteByLatency = true
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
+ Eventually(func() int64 {
+ return client.DBSize(ctx).Val()
+ }, 30*time.Second).Should(Equal(int64(0)))
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
+ return slave.ReadWrite(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+
+ Describe("ClusterClient with ClusterSlots", func() {
+ BeforeEach(func() {
+ failover = true
+
+ opt = redisClusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) {
+ slots := []redis.ClusterSlot{{
+ Start: 0,
+ End: 4999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard1Port,
+ }},
+ }, {
+ Start: 5000,
+ End: 9999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard2Port,
+ }},
+ }, {
+ Start: 10000,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard3Port,
+ }},
+ }}
+ return slots, nil
+ }
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
+ Eventually(func() int64 {
+ return client.DBSize(ctx).Val()
+ }, 30*time.Second).Should(Equal(int64(0)))
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ failover = false
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+
+ Describe("ClusterClient with RouteRandomly and ClusterSlots", func() {
+ BeforeEach(func() {
+ failover = true
+
+ opt = redisClusterOptions()
+ opt.RouteRandomly = true
+ opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) {
+ slots := []redis.ClusterSlot{{
+ Start: 0,
+ End: 4999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard1Port,
+ }},
+ }, {
+ Start: 5000,
+ End: 9999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard2Port,
+ }},
+ }, {
+ Start: 10000,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":" + ringShard3Port,
+ }},
+ }}
+ return slots, nil
+ }
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
+ Eventually(func() int64 {
+ return client.DBSize(ctx).Val()
+ }, 30*time.Second).Should(Equal(int64(0)))
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ failover = false
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+
+ Describe("ClusterClient with ClusterSlots with multiple nodes per slot", func() {
+ BeforeEach(func() {
+ failover = true
+
+ opt = redisClusterOptions()
+ opt.ReadOnly = true
+ opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) {
+ slots := []redis.ClusterSlot{{
+ Start: 0,
+ End: 4999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":8220",
+ }, {
+ Addr: ":8223",
+ }},
+ }, {
+ Start: 5000,
+ End: 9999,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":8221",
+ }, {
+ Addr: ":8224",
+ }},
+ }, {
+ Start: 10000,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":8222",
+ }, {
+ Addr: ":8225",
+ }},
+ }}
+ return slots, nil
+ }
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
+ Eventually(func() int64 {
+ return client.DBSize(ctx).Val()
+ }, 30*time.Second).Should(Equal(int64(0)))
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ failover = false
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+})
+
+var _ = Describe("ClusterClient without nodes", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewClusterClient(&redis.ClusterOptions{})
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("Ping returns an error", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(MatchError("redis: cluster has no nodes"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).To(MatchError("redis: cluster has no nodes"))
+ })
+})
+
+var _ = Describe("ClusterClient without valid nodes", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: []string{redisAddr},
+ })
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns an error", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ })
+})
+
+var _ = Describe("ClusterClient with unavailable Cluster", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ for _, node := range cluster.clients {
+ err := node.ClientPause(ctx, 5*time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ opt := redisClusterOptions()
+ opt.ReadTimeout = 250 * time.Millisecond
+ opt.WriteTimeout = 250 * time.Millisecond
+ opt.MaxRedirects = 1
+ client = cluster.newClusterClientUnstable(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("recovers when Cluster recovers", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+
+ Eventually(func() error {
+ return client.Ping(ctx).Err()
+ }, "30s").ShouldNot(HaveOccurred())
+ })
+})
+
+var _ = Describe("ClusterClient timeout", func() {
+ var client *redis.ClusterClient
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ testTimeout := func() {
+ It("Ping timeouts", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Pipeline timeouts", func() {
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx timeouts", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ return tx.Ping(ctx).Err()
+ }, "foo")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx Pipeline timeouts", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ }, "foo")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+ }
+
+ const pause = 5 * time.Second
+
+ Context("read/write timeout", func() {
+ BeforeEach(func() {
+ opt := redisClusterOptions()
+ opt.ReadTimeout = 250 * time.Millisecond
+ opt.WriteTimeout = 250 * time.Millisecond
+ opt.MaxRedirects = 1
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
+ return client.ClientPause(ctx, pause).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
+ defer GinkgoRecover()
+ Eventually(func() error {
+ return client.Ping(ctx).Err()
+ }, 2*pause).ShouldNot(HaveOccurred())
+ return nil
+ })
+ })
+
+ testTimeout()
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
new file mode 100644
index 0000000..4bb12a8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
@@ -0,0 +1,3478 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hscan"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type Cmder interface {
+ Name() string
+ FullName() string
+ Args() []interface{}
+ String() string
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+
+ if info != nil {
+ return int(info.FirstKeyPos)
+ }
+ return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return internal.String(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply(sliceParser)
+ return err
+}
+
+// sliceParser implements proto.MultiBulkParse.
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, n)
+ for i := 0; i < len(vals); i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals[i] = nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals[i] = err
+ continue
+ }
+ return nil, err
+ }
+ vals[i] = v
+ }
+ return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(sliceParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadIntReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ num, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = num
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = time.Unix(sec, microsec*1000)
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case string:
+ cmd.val = v == "OK"
+ return nil
+ default:
+ return fmt.Errorf("got %T, wanted int64 or string", v)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloatReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloatReply(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = n == 1
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = n
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var err error
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < n; i++ {
+ var err error
+ msgs[i], err = readXMessage(rd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return XMessage{}, err
+ }
+ if n != 2 {
+ return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ var values map[string]interface{}
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ } else {
+ values = v.(map[string]interface{})
+ }
+
+ return XMessage{
+ ID: id,
+ Values: values,
+ }, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, err := readXMessageSlice(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = XStream{
+ Stream: stream,
+ Messages: msgs,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.val.Consumers == nil {
+ cmd.val.Consumers = make(map[string]int64)
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = append(cmd.val, XPendingExt{
+ ID: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+ var err error
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+ var err error
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXConsumerInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+ var consumer XInfoConsumer
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return consumer, err
+ }
+ if n != 6 {
+ return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+ }
+
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return consumer, err
+ }
+
+ val, err := rd.ReadString()
+ if err != nil {
+ return consumer, err
+ }
+
+ switch key {
+ case "name":
+ consumer.Name = val
+ case "pending":
+ consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return consumer, err
+ }
+ case "idle":
+ consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return consumer, err
+ }
+ default:
+ return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ }
+
+ return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXGroupInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+ var group XInfoGroup
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return group, err
+ }
+ if n != 8 {
+ return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
+ }
+
+ for i := 0; i < 4; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ val, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ switch key {
+ case "name":
+ group.Name = val
+ case "consumers":
+ group.Consumers, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "pending":
+ group.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID = val
+ default:
+ return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+ }
+ }
+
+ return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ FirstEntry XMessage
+ LastEntry XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(xStreamInfoParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.(*XInfoStream)
+ return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 14 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 14", n)
+ }
+ var info XInfoStream
+ for i := 0; i < 7; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ switch key {
+ case "length":
+ info.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ info.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ info.RadixTreeNodes, err = rd.ReadIntReply()
+ case "groups":
+ info.Groups, err = rd.ReadIntReply()
+ case "last-generated-id":
+ info.LastGeneratedID, err = rd.ReadString()
+ case "first-entry":
+ info.FirstEntry, err = readXMessage(rd)
+ if err == Nil {
+ err = nil
+ }
+ case "last-entry":
+ info.LastEntry, err = readXMessage(rd)
+ if err == Nil {
+ err = nil
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &info, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n != 12 {
+ return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 12", n)
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < 6; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ default:
+ return fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 10 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 10", nn)
+ }
+
+ group := XInfoStreamGroup{}
+
+ for f := 0; f < 5; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ case "pel-count":
+ group.PelCount, err = rd.ReadIntReply()
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 4 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 4", nn)
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 8 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 8", nn)
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < 4; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+ case "pel-count":
+ c.PelCount, err = rd.ReadIntReply()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 3 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 3", nn)
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]Z, n/2)
+ for i := 0; i < len(cmd.val); i++ {
+ member, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ score, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = Z{
+ Member: member,
+ Score: score,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 3 {
+ return nil, fmt.Errorf("got %d elements, expected 3", n)
+ }
+
+ cmd.val = &ZWithKey{}
+ var err error
+
+ cmd.val.Key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.page, cmd.cursor, err = rd.ReadScanReply()
+ return err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]ClusterSlot, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].ID = id
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if err != nil {
+ return err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ // TODO: avoid copying
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn != 2 {
+ return fmt.Errorf("got %d coordinates, expected 2", nn)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]*GeoPos, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ longitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ latitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ cmd.val[vv.Name] = vv
+ }
+ return nil, nil
+ })
+ return err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+
+ switch n {
+ case numArgRedis5, numArgRedis6:
+ // continue
+ default:
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+ }
+
+ var cmd CommandInfo
+ var err error
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.Flags = make([]string, n)
+ for i := 0; i < len(cmd.Flags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.Flags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.Flags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ if n == numArgRedis5 {
+ return &cmd, nil
+ }
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.ACLFlags = make([]string, n)
+ for i := 0; i < len(cmd.ACLFlags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.ACLFlags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.ACLFlags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]SlowLog, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 4 {
+ err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+ return nil, err
+ }
+
+ id, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ createdAt, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ createdAtTime := time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ costsDuration := time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if cmdLen < 1 {
+ err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ return nil, err
+ }
+
+ cmdString := make([]string, cmdLen)
+ for i := 0; i < cmdLen; i++ {
+ cmdString[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var address, name string
+ for i := 4; i < n; i++ {
+ str, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if i == 4 {
+ address = str
+ } else if i == 5 {
+ name = str
+ }
+ }
+
+ cmd.val[i] = SlowLog{
+ ID: id,
+ Time: createdAtTime,
+ Duration: costsDuration,
+ Args: cmdString,
+ ClientAddr: address,
+ ClientName: name,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go
new file mode 100644
index 0000000..168f9f6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go
@@ -0,0 +1,96 @@
+package redis_test
+
+import (
+ "errors"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ redis "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("Cmd", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("implements Stringer", func() {
+ set := client.Set(ctx, "foo", "bar", 0)
+ Expect(set.String()).To(Equal("set foo bar: OK"))
+
+ get := client.Get(ctx, "foo")
+ Expect(get.String()).To(Equal("get foo: bar"))
+ })
+
+ It("has val/err", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+ })
+
+ It("has helpers", func() {
+ set := client.Set(ctx, "key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+
+ n, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(10)))
+
+ un, err := client.Get(ctx, "key").Uint64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(un).To(Equal(uint64(10)))
+
+ f, err := client.Get(ctx, "key").Float64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(f).To(Equal(float64(10)))
+ })
+
+ It("supports float32", func() {
+ f := float32(66.97)
+
+ err := client.Set(ctx, "float_key", f, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "float_key").Float32()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(f))
+ })
+
+ It("supports time.Time", func() {
+ tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
+
+ err := client.Set(ctx, "time_key", tm, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ s, err := client.Get(ctx, "time_key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal("2019-01-01T09:45:10.000222125Z"))
+
+ tm2, err := client.Get(ctx, "time_key").Time()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(tm2).To(BeTemporally("==", tm))
+ })
+
+ It("allows to set custom error", func() {
+ e := errors.New("custom error")
+ cmd := redis.Cmd{}
+ cmd.SetErr(e)
+ _, err := cmd.Result()
+ Expect(err).To(Equal(e))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
new file mode 100644
index 0000000..bbfe089
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
@@ -0,0 +1,3475 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ default:
+ return append(dst, arg)
+ }
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ // TODO: rename to SetEx
+ SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+ Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *StringStringMapCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
+
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+ BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+ // TODO: XTrim and XTrimApprox remove in v9.
+ XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+ // TODO: remove
+ // ZAddCh
+ // ZIncr
+ // ZAddNXCh
+ // ZAddXXCh
+ // ZIncrNX
+ // ZIncrXX
+ // in v9.
+ // use ZAddArgs and ZAddArgsIncr.
+
+ ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *SliceCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+ GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+ GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+ ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "expire"
+ args[1] = key
+ args[2] = formatSec(ctx, expiration)
+ if mode != "" {
+ args = append(args, mode)
+ }
+
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+ args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+ if replace {
+ args = append(args, "REPLACE")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+ a := make([]interface{}, 0, 2+len(args))
+ a = append(a, "bitfield")
+ a = append(a, key)
+ a = append(a, args...)
+ cmd := NewIntSliceCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+ args := make([]interface{}, 0, 4)
+
+ // Although count=0 is meaningless, redis accepts count=0.
+ args = append(args, "hrandfield", key, count)
+ if withValues {
+ args = append(args, "withvalues")
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMove(
+ ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+ cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+
+ // Deprecated: use MaxLen+Approx, remove in v9.
+ MaxLenApprox int64 // MAXLEN ~ N
+
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MaxLenApprox > 0:
+ // TODO remove in v9.
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 6+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+ args.Members = make([]Z, len(members))
+ for i, m := range members {
+ args.Members[i] = *m
+ }
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{}, members...)
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{
+ NX: true,
+ }, members...)
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{
+ XX: true,
+ }, members...)
+}
+
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{
+ Ch: true,
+ }, members...)
+}
+
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// NX: true,
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{
+ NX: true,
+ Ch: true,
+ }, members...)
+}
+
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// XX: true,
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ return c.zAdd(ctx, key, ZAddArgs{
+ XX: true,
+ Ch: true,
+ }, members...)
+}
+
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ Members: []Z{*member},
+ })
+}
+
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// NX: true,
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ NX: true,
+ Members: []Z{*member},
+ })
+}
+
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// XX: true,
+// Members: []Z,
+// })
+// remove in v9.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ XX: true,
+ Members: []Z{*member},
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the <Start> and <Stop> options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+ args := make([]interface{}, 0, 4)
+
+ // Although count=0 is meaningless, redis accepts count=0.
+ args = append(args, "zrandmember", key, count)
+ if withScores {
+ args = append(args, "withscores")
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+ args := make([]interface{}, 0, 13)
+ args = append(args, "geosearch", key)
+ args = geoSearchArgs(q, args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+ ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+ args := make([]interface{}, 0, 16)
+ args = append(args, "geosearch", key)
+ args = geoSearchLocationArgs(q, args)
+ cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+ args := make([]interface{}, 0, 15)
+ args = append(args, "geosearchstore", store, key)
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+ if q.StoreDist {
+ args = append(args, "storedist")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go
new file mode 100644
index 0000000..030bdf3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go
@@ -0,0 +1,5522 @@
+package redis_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+var _ = Describe("Commands", func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("server", func() {
+ It("should Auth", func() {
+ cmds, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Auth(ctx, "password")
+ pipe.Auth(ctx, "")
+ return nil
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("ERR AUTH"))
+ Expect(cmds[0].Err().Error()).To(ContainSubstring("ERR AUTH"))
+ Expect(cmds[1].Err().Error()).To(ContainSubstring("ERR AUTH"))
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ Expect(stats.TotalConns).To(Equal(uint32(1)))
+ Expect(stats.IdleConns).To(Equal(uint32(1)))
+ })
+
+ It("should Echo", func() {
+ pipe := client.Pipeline()
+ echo := pipe.Echo(ctx, "hello")
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(echo.Err()).NotTo(HaveOccurred())
+ Expect(echo.Val()).To(Equal("hello"))
+ })
+
+ It("should Ping", func() {
+ ping := client.Ping(ctx)
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ })
+
+ It("should Wait", func() {
+ const wait = 3 * time.Second
+
+ // assume testing on single redis instance
+ start := time.Now()
+ val, err := client.Wait(ctx, 1, wait).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(0)))
+ Expect(time.Now()).To(BeTemporally("~", start.Add(wait), 3*time.Second))
+ })
+
+ It("should Select", func() {
+ pipe := client.Pipeline()
+ sel := pipe.Select(ctx, 1)
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(sel.Err()).NotTo(HaveOccurred())
+ Expect(sel.Val()).To(Equal("OK"))
+ })
+
+ It("should SwapDB", func() {
+ pipe := client.Pipeline()
+ sel := pipe.SwapDB(ctx, 1, 2)
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(sel.Err()).NotTo(HaveOccurred())
+ Expect(sel.Val()).To(Equal("OK"))
+ })
+
+ It("should BgRewriteAOF", func() {
+ Skip("flaky test")
+
+ val, err := client.BgRewriteAOF(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(ContainSubstring("Background append only file rewriting"))
+ })
+
+ It("should BgSave", func() {
+ Skip("flaky test")
+
+ // workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress"
+ Eventually(func() string {
+ return client.BgSave(ctx).Val()
+ }, "30s").Should(Equal("Background saving started"))
+ })
+
+ It("should ClientKill", func() {
+ r := client.ClientKill(ctx, "1.1.1.1:1111")
+ Expect(r.Err()).To(MatchError("ERR No such client"))
+ Expect(r.Val()).To(Equal(""))
+ })
+
+ It("should ClientKillByFilter", func() {
+ r := client.ClientKillByFilter(ctx, "TYPE", "test")
+ Expect(r.Err()).To(MatchError("ERR Unknown client type 'test'"))
+ Expect(r.Val()).To(Equal(int64(0)))
+ })
+
+ It("should ClientID", func() {
+ err := client.ClientID(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.ClientID(ctx).Val()).To(BeNumerically(">=", 0))
+ })
+
+ It("should ClientUnblock", func() {
+ id := client.ClientID(ctx).Val()
+ r, err := client.ClientUnblock(ctx, id).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(r).To(Equal(int64(0)))
+ })
+
+ It("should ClientUnblockWithError", func() {
+ id := client.ClientID(ctx).Val()
+ r, err := client.ClientUnblockWithError(ctx, id).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(r).To(Equal(int64(0)))
+ })
+
+ It("should ClientPause", func() {
+ err := client.ClientPause(ctx, time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ start := time.Now()
+ err = client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(time.Now()).To(BeTemporally("~", start.Add(time.Second), 800*time.Millisecond))
+ })
+
+ It("should ClientSetName and ClientGetName", func() {
+ pipe := client.Pipeline()
+ set := pipe.ClientSetName(ctx, "theclientname")
+ get := pipe.ClientGetName(ctx)
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(BeTrue())
+
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("theclientname"))
+ })
+
+ It("should ConfigGet", func() {
+ val, err := client.ConfigGet(ctx, "*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(BeEmpty())
+ })
+
+ It("should ConfigResetStat", func() {
+ r := client.ConfigResetStat(ctx)
+ Expect(r.Err()).NotTo(HaveOccurred())
+ Expect(r.Val()).To(Equal("OK"))
+ })
+
+ It("should ConfigSet", func() {
+ configGet := client.ConfigGet(ctx, "maxmemory")
+ Expect(configGet.Err()).NotTo(HaveOccurred())
+ Expect(configGet.Val()).To(HaveLen(2))
+ Expect(configGet.Val()[0]).To(Equal("maxmemory"))
+
+ configSet := client.ConfigSet(ctx, "maxmemory", configGet.Val()[1].(string))
+ Expect(configSet.Err()).NotTo(HaveOccurred())
+ Expect(configSet.Val()).To(Equal("OK"))
+ })
+
+ It("should ConfigRewrite", func() {
+ configRewrite := client.ConfigRewrite(ctx)
+ Expect(configRewrite.Err()).NotTo(HaveOccurred())
+ Expect(configRewrite.Val()).To(Equal("OK"))
+ })
+
+ It("should DBSize", func() {
+ size, err := client.DBSize(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(0)))
+ })
+
+ It("should Info", func() {
+ info := client.Info(ctx)
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ })
+
+ It("should Info cpu", func() {
+ info := client.Info(ctx, "cpu")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
+ })
+
+ It("should LastSave", func() {
+ lastSave := client.LastSave(ctx)
+ Expect(lastSave.Err()).NotTo(HaveOccurred())
+ Expect(lastSave.Val()).NotTo(Equal(0))
+ })
+
+ It("should Save", func() {
+ // workaround for "ERR Background save already in progress"
+ Eventually(func() string {
+ return client.Save(ctx).Val()
+ }, "10s").Should(Equal("OK"))
+ })
+
+ It("should SlaveOf", func() {
+ slaveOf := client.SlaveOf(ctx, "localhost", "8888")
+ Expect(slaveOf.Err()).NotTo(HaveOccurred())
+ Expect(slaveOf.Val()).To(Equal("OK"))
+
+ slaveOf = client.SlaveOf(ctx, "NO", "ONE")
+ Expect(slaveOf.Err()).NotTo(HaveOccurred())
+ Expect(slaveOf.Val()).To(Equal("OK"))
+ })
+
+ It("should Time", func() {
+ tm, err := client.Time(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
+ })
+
+ It("should Command", func() {
+ cmds, err := client.Command(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(BeNumerically("~", 200, 25))
+
+ cmd := cmds["mget"]
+ Expect(cmd.Name).To(Equal("mget"))
+ Expect(cmd.Arity).To(Equal(int8(-2)))
+ Expect(cmd.Flags).To(ContainElement("readonly"))
+ Expect(cmd.FirstKeyPos).To(Equal(int8(1)))
+ Expect(cmd.LastKeyPos).To(Equal(int8(-1)))
+ Expect(cmd.StepCount).To(Equal(int8(1)))
+
+ cmd = cmds["ping"]
+ Expect(cmd.Name).To(Equal("ping"))
+ Expect(cmd.Arity).To(Equal(int8(-1)))
+ Expect(cmd.Flags).To(ContainElement("stale"))
+ Expect(cmd.Flags).To(ContainElement("fast"))
+ Expect(cmd.FirstKeyPos).To(Equal(int8(0)))
+ Expect(cmd.LastKeyPos).To(Equal(int8(0)))
+ Expect(cmd.StepCount).To(Equal(int8(0)))
+ })
+ })
+
+ Describe("debugging", func() {
+ It("should DebugObject", func() {
+ err := client.DebugObject(ctx, "foo").Err()
+ Expect(err).To(MatchError("ERR no such key"))
+
+ err = client.Set(ctx, "foo", "bar", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ s, err := client.DebugObject(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(ContainSubstring("serializedlength:4"))
+ })
+
+ It("should MemoryUsage", func() {
+ err := client.MemoryUsage(ctx, "foo").Err()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.Set(ctx, "foo", "bar", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.MemoryUsage(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).NotTo(BeZero())
+
+ n, err = client.MemoryUsage(ctx, "foo", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).NotTo(BeZero())
+ })
+ })
+
+ Describe("keys", func() {
+ It("should Del", func() {
+ err := client.Set(ctx, "key1", "Hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.Set(ctx, "key2", "World", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Del(ctx, "key1", "key2", "key3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Unlink", func() {
+ err := client.Set(ctx, "key1", "Hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.Set(ctx, "key2", "World", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Unlink(ctx, "key1", "key2", "key3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Dump", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ dump := client.Dump(ctx, "key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+ Expect(dump.Val()).NotTo(BeEmpty())
+ })
+
+ It("should Exists", func() {
+ set := client.Set(ctx, "key1", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ n, err := client.Exists(ctx, "key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Exists(ctx, "key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+
+ n, err = client.Exists(ctx, "key1", "key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Exists(ctx, "key1", "key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Expire", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire(ctx, "key", 10*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+ set = client.Set(ctx, "key", "Hello World", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ ttl = client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Duration(-1)))
+
+ ttl = client.TTL(ctx, "nonexistent_key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Duration(-2)))
+ })
+
+ It("should ExpireAt", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ n, err := client.Exists(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ expireAt := client.ExpireAt(ctx, "key", time.Now().Add(-time.Hour))
+ Expect(expireAt.Err()).NotTo(HaveOccurred())
+ Expect(expireAt.Val()).To(Equal(true))
+
+ n, err = client.Exists(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should Keys", func() {
+ mset := client.MSet(ctx, "one", "1", "two", "2", "three", "3", "four", "4")
+ Expect(mset.Err()).NotTo(HaveOccurred())
+ Expect(mset.Val()).To(Equal("OK"))
+
+ keys := client.Keys(ctx, "*o*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "two"}))
+
+ keys = client.Keys(ctx, "t??")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(Equal([]string{"two"}))
+
+ keys = client.Keys(ctx, "*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "three", "two"}))
+ })
+
+ It("should Migrate", func() {
+ migrate := client.Migrate(ctx, "localhost", redisSecondaryPort, "key", 0, 0)
+ Expect(migrate.Err()).NotTo(HaveOccurred())
+ Expect(migrate.Val()).To(Equal("NOKEY"))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ migrate = client.Migrate(ctx, "localhost", redisSecondaryPort, "key", 0, 0)
+ Expect(migrate.Err()).To(MatchError("IOERR error or timeout writing to target instance"))
+ Expect(migrate.Val()).To(Equal(""))
+ })
+
+ It("should Move", func() {
+ move := client.Move(ctx, "key", 2)
+ Expect(move.Err()).NotTo(HaveOccurred())
+ Expect(move.Val()).To(Equal(false))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ move = client.Move(ctx, "key", 2)
+ Expect(move.Err()).NotTo(HaveOccurred())
+ Expect(move.Val()).To(Equal(true))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ pipe := client.Pipeline()
+ pipe.Select(ctx, 2)
+ get = pipe.Get(ctx, "key")
+ pipe.FlushDB(ctx)
+
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should Object", func() {
+ start := time.Now()
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ refCount := client.ObjectRefCount(ctx, "key")
+ Expect(refCount.Err()).NotTo(HaveOccurred())
+ Expect(refCount.Val()).To(Equal(int64(1)))
+
+ err := client.ObjectEncoding(ctx, "key").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ idleTime := client.ObjectIdleTime(ctx, "key")
+ Expect(idleTime.Err()).NotTo(HaveOccurred())
+
+ // Redis returned milliseconds/1000, which may cause ObjectIdleTime to be at a critical value,
+ // should be +1s to deal with the critical value problem.
+ // if too much time (>1s) is used during command execution, it may also cause the test to fail.
+ // so the ObjectIdleTime result should be <=now-start+1s
+ // link: https://github.com/redis/redis/blob/5b48d900498c85bbf4772c1d466c214439888115/src/object.c#L1265-L1272
+ Expect(idleTime.Val()).To(BeNumerically("<=", time.Now().Sub(start)+time.Second))
+ })
+
+ It("should Persist", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire(ctx, "key", 10*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+ persist := client.Persist(ctx, "key")
+ Expect(persist.Err()).NotTo(HaveOccurred())
+ Expect(persist.Val()).To(Equal(true))
+
+ ttl = client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val() < 0).To(Equal(true))
+ })
+
+ It("should PExpire", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := 900 * time.Millisecond
+ pexpire := client.PExpire(ctx, "key", expiration)
+ Expect(pexpire.Err()).NotTo(HaveOccurred())
+ Expect(pexpire.Val()).To(Equal(true))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Second))
+
+ pttl := client.PTTL(ctx, "key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+ })
+
+ It("should PExpireAt", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := 900 * time.Millisecond
+ pexpireat := client.PExpireAt(ctx, "key", time.Now().Add(expiration))
+ Expect(pexpireat.Err()).NotTo(HaveOccurred())
+ Expect(pexpireat.Val()).To(Equal(true))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Second))
+
+ pttl := client.PTTL(ctx, "key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+ })
+
+ It("should PTTL", func() {
+ set := client.Set(ctx, "key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := time.Second
+ expire := client.Expire(ctx, "key", expiration)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ pttl := client.PTTL(ctx, "key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
+ })
+
+ It("should RandomKey", func() {
+ randomKey := client.RandomKey(ctx)
+ Expect(randomKey.Err()).To(Equal(redis.Nil))
+ Expect(randomKey.Val()).To(Equal(""))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ randomKey = client.RandomKey(ctx)
+ Expect(randomKey.Err()).NotTo(HaveOccurred())
+ Expect(randomKey.Val()).To(Equal("key"))
+ })
+
+ It("should Rename", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ status := client.Rename(ctx, "key", "key1")
+ Expect(status.Err()).NotTo(HaveOccurred())
+ Expect(status.Val()).To(Equal("OK"))
+
+ get := client.Get(ctx, "key1")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should RenameNX", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ renameNX := client.RenameNX(ctx, "key", "key1")
+ Expect(renameNX.Err()).NotTo(HaveOccurred())
+ Expect(renameNX.Val()).To(Equal(true))
+
+ get := client.Get(ctx, "key1")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should Restore", func() {
+ err := client.Set(ctx, "key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump := client.Dump(ctx, "key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+
+ err = client.Del(ctx, "key").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ restore, err := client.Restore(ctx, "key", 0, dump.Val()).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(restore).To(Equal("OK"))
+
+ type_, err := client.Type(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(type_).To(Equal("string"))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should RestoreReplace", func() {
+ err := client.Set(ctx, "key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump := client.Dump(ctx, "key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+
+ restore, err := client.RestoreReplace(ctx, "key", 0, dump.Val()).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(restore).To(Equal("OK"))
+
+ type_, err := client.Type(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(type_).To(Equal("string"))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should Sort", func() {
+ size, err := client.LPush(ctx, "list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush(ctx, "list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush(ctx, "list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ els, err := client.Sort(ctx, "list", &redis.Sort{
+ Offset: 0,
+ Count: 2,
+ Order: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"1", "2"}))
+ })
+
+ It("should Sort and Get", func() {
+ size, err := client.LPush(ctx, "list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush(ctx, "list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush(ctx, "list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ err = client.Set(ctx, "object_2", "value2", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ {
+ els, err := client.Sort(ctx, "list", &redis.Sort{
+ Get: []string{"object_*"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"", "value2", ""}))
+ }
+
+ {
+ els, err := client.SortInterfaces(ctx, "list", &redis.Sort{
+ Get: []string{"object_*"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]interface{}{nil, "value2", nil}))
+ }
+ })
+
+ It("should Sort and Store", func() {
+ size, err := client.LPush(ctx, "list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush(ctx, "list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush(ctx, "list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ n, err := client.SortStore(ctx, "list", "list2", &redis.Sort{
+ Offset: 0,
+ Count: 2,
+ Order: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ els, err := client.LRange(ctx, "list2", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"1", "2"}))
+ })
+
+ It("should Touch", func() {
+ set1 := client.Set(ctx, "touch1", "hello", 0)
+ Expect(set1.Err()).NotTo(HaveOccurred())
+ Expect(set1.Val()).To(Equal("OK"))
+
+ set2 := client.Set(ctx, "touch2", "hello", 0)
+ Expect(set2.Err()).NotTo(HaveOccurred())
+ Expect(set2.Val()).To(Equal("OK"))
+
+ touch := client.Touch(ctx, "touch1", "touch2", "touch3")
+ Expect(touch.Err()).NotTo(HaveOccurred())
+ Expect(touch.Val()).To(Equal(int64(2)))
+ })
+
+ It("should TTL", func() {
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val() < 0).To(Equal(true))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire(ctx, "key", 60*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl = client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(60 * time.Second))
+ })
+
+ It("should Type", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ type_ := client.Type(ctx, "key")
+ Expect(type_.Err()).NotTo(HaveOccurred())
+ Expect(type_.Val()).To(Equal("string"))
+ })
+ })
+
+ Describe("scanning", func() {
+ It("should Scan", func() {
+ for i := 0; i < 1000; i++ {
+ set := client.Set(ctx, fmt.Sprintf("key%d", i), "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.Scan(ctx, 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should ScanType", func() {
+ for i := 0; i < 1000; i++ {
+ set := client.Set(ctx, fmt.Sprintf("key%d", i), "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.ScanType(ctx, 0, "", 0, "string").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should SScan", func() {
+ for i := 0; i < 1000; i++ {
+ sadd := client.SAdd(ctx, "myset", fmt.Sprintf("member%d", i))
+ Expect(sadd.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.SScan(ctx, "myset", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should HScan", func() {
+ for i := 0; i < 1000; i++ {
+ sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello")
+ Expect(sadd.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.HScan(ctx, "myhash", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should ZScan", func() {
+ for i := 0; i < 1000; i++ {
+ err := client.ZAdd(ctx, "myset", &redis.Z{
+ Score: float64(i),
+ Member: fmt.Sprintf("member%d", i),
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.ZScan(ctx, "myset", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+ })
+
+ Describe("strings", func() {
+ It("should Append", func() {
+ n, err := client.Exists(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+
+ append := client.Append(ctx, "key", "Hello")
+ Expect(append.Err()).NotTo(HaveOccurred())
+ Expect(append.Val()).To(Equal(int64(5)))
+
+ append = client.Append(ctx, "key", " World")
+ Expect(append.Err()).NotTo(HaveOccurred())
+ Expect(append.Val()).To(Equal(int64(11)))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("Hello World"))
+ })
+
+ It("should BitCount", func() {
+ set := client.Set(ctx, "key", "foobar", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitCount := client.BitCount(ctx, "key", nil)
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(26)))
+
+ bitCount = client.BitCount(ctx, "key", &redis.BitCount{
+ Start: 0,
+ End: 0,
+ })
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(4)))
+
+ bitCount = client.BitCount(ctx, "key", &redis.BitCount{
+ Start: 1,
+ End: 1,
+ })
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(6)))
+ })
+
+ It("should BitOpAnd", func() {
+ set := client.Set(ctx, "key1", "1", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set(ctx, "key2", "0", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpAnd := client.BitOpAnd(ctx, "dest", "key1", "key2")
+ Expect(bitOpAnd.Err()).NotTo(HaveOccurred())
+ Expect(bitOpAnd.Val()).To(Equal(int64(1)))
+
+ get := client.Get(ctx, "dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("0"))
+ })
+
+ It("should BitOpOr", func() {
+ set := client.Set(ctx, "key1", "1", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set(ctx, "key2", "0", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpOr := client.BitOpOr(ctx, "dest", "key1", "key2")
+ Expect(bitOpOr.Err()).NotTo(HaveOccurred())
+ Expect(bitOpOr.Val()).To(Equal(int64(1)))
+
+ get := client.Get(ctx, "dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("1"))
+ })
+
+ It("should BitOpXor", func() {
+ set := client.Set(ctx, "key1", "\xff", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set(ctx, "key2", "\x0f", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpXor := client.BitOpXor(ctx, "dest", "key1", "key2")
+ Expect(bitOpXor.Err()).NotTo(HaveOccurred())
+ Expect(bitOpXor.Val()).To(Equal(int64(1)))
+
+ get := client.Get(ctx, "dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("\xf0"))
+ })
+
+ It("should BitOpNot", func() {
+ set := client.Set(ctx, "key1", "\x00", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpNot := client.BitOpNot(ctx, "dest", "key1")
+ Expect(bitOpNot.Err()).NotTo(HaveOccurred())
+ Expect(bitOpNot.Val()).To(Equal(int64(1)))
+
+ get := client.Get(ctx, "dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("\xff"))
+ })
+
+ It("should BitPos", func() {
+ err := client.Set(ctx, "mykey", "\xff\xf0\x00", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ pos, err := client.BitPos(ctx, "mykey", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(12)))
+
+ pos, err = client.BitPos(ctx, "mykey", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(0)))
+
+ pos, err = client.BitPos(ctx, "mykey", 0, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPos(ctx, "mykey", 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos(ctx, "mykey", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPos(ctx, "mykey", 1, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos(ctx, "mykey", 0, 2, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos(ctx, "mykey", 0, 0, -3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos(ctx, "mykey", 0, 0, 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+ })
+
+ It("should BitField", func() {
+ nn, err := client.BitField(ctx, "mykey", "INCRBY", "i5", 100, 1, "GET", "u4", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{1, 0}))
+ })
+
+ It("should Decr", func() {
+ set := client.Set(ctx, "key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decr := client.Decr(ctx, "key")
+ Expect(decr.Err()).NotTo(HaveOccurred())
+ Expect(decr.Val()).To(Equal(int64(9)))
+
+ set = client.Set(ctx, "key", "234293482390480948029348230948", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decr = client.Decr(ctx, "key")
+ Expect(decr.Err()).To(MatchError("ERR value is not an integer or out of range"))
+ Expect(decr.Val()).To(Equal(int64(0)))
+ })
+
+ It("should DecrBy", func() {
+ set := client.Set(ctx, "key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decrBy := client.DecrBy(ctx, "key", 5)
+ Expect(decrBy.Err()).NotTo(HaveOccurred())
+ Expect(decrBy.Val()).To(Equal(int64(5)))
+ })
+
+ It("should Get", func() {
+ get := client.Get(ctx, "_")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get = client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should GetBit", func() {
+ setBit := client.SetBit(ctx, "key", 7, 1)
+ Expect(setBit.Err()).NotTo(HaveOccurred())
+ Expect(setBit.Val()).To(Equal(int64(0)))
+
+ getBit := client.GetBit(ctx, "key", 0)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(0)))
+
+ getBit = client.GetBit(ctx, "key", 7)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(1)))
+
+ getBit = client.GetBit(ctx, "key", 100)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(0)))
+ })
+
+ It("should GetRange", func() {
+ set := client.Set(ctx, "key", "This is a string", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ getRange := client.GetRange(ctx, "key", 0, 3)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("This"))
+
+ getRange = client.GetRange(ctx, "key", -3, -1)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("ing"))
+
+ getRange = client.GetRange(ctx, "key", 0, -1)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("This is a string"))
+
+ getRange = client.GetRange(ctx, "key", 10, 100)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("string"))
+ })
+
+ It("should GetSet", func() {
+ incr := client.Incr(ctx, "key")
+ Expect(incr.Err()).NotTo(HaveOccurred())
+ Expect(incr.Val()).To(Equal(int64(1)))
+
+ getSet := client.GetSet(ctx, "key", "0")
+ Expect(getSet.Err()).NotTo(HaveOccurred())
+ Expect(getSet.Val()).To(Equal("1"))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("0"))
+ })
+
+ It("should GetEX", func() {
+ set := client.Set(ctx, "key", "value", 100*time.Second)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(BeNumerically("~", 100*time.Second, 3*time.Second))
+
+ getEX := client.GetEx(ctx, "key", 200*time.Second)
+ Expect(getEX.Err()).NotTo(HaveOccurred())
+ Expect(getEX.Val()).To(Equal("value"))
+
+ ttl = client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(BeNumerically("~", 200*time.Second, 3*time.Second))
+ })
+
+ It("should GetDel", func() {
+ set := client.Set(ctx, "key", "value", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ getDel := client.GetDel(ctx, "key")
+ Expect(getDel.Err()).NotTo(HaveOccurred())
+ Expect(getDel.Val()).To(Equal("value"))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ })
+
+ It("should Incr", func() {
+ set := client.Set(ctx, "key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incr := client.Incr(ctx, "key")
+ Expect(incr.Err()).NotTo(HaveOccurred())
+ Expect(incr.Val()).To(Equal(int64(11)))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("11"))
+ })
+
+ It("should IncrBy", func() {
+ set := client.Set(ctx, "key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrBy := client.IncrBy(ctx, "key", 5)
+ Expect(incrBy.Err()).NotTo(HaveOccurred())
+ Expect(incrBy.Val()).To(Equal(int64(15)))
+ })
+
+ It("should IncrByFloat", func() {
+ set := client.Set(ctx, "key", "10.50", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrByFloat := client.IncrByFloat(ctx, "key", 0.1)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(10.6))
+
+ set = client.Set(ctx, "key", "5.0e3", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrByFloat = client.IncrByFloat(ctx, "key", 2.0e2)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(float64(5200)))
+ })
+
+ It("should IncrByFloatOverflow", func() {
+ incrByFloat := client.IncrByFloat(ctx, "key", 996945661)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(float64(996945661)))
+ })
+
+ It("should MSetMGet", func() {
+ mSet := client.MSet(ctx, "key1", "hello1", "key2", "hello2")
+ Expect(mSet.Err()).NotTo(HaveOccurred())
+ Expect(mSet.Val()).To(Equal("OK"))
+
+ mGet := client.MGet(ctx, "key1", "key2", "_")
+ Expect(mGet.Err()).NotTo(HaveOccurred())
+ Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil}))
+ })
+
+ It("should scan Mget", func() {
+ err := client.MSet(ctx, "key1", "hello1", "key2", 123).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ res := client.MGet(ctx, "key1", "key2", "_")
+ Expect(res.Err()).NotTo(HaveOccurred())
+
+ type data struct {
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ }
+ var d data
+ Expect(res.Scan(&d)).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ })
+
+ It("should MSetNX", func() {
+ mSetNX := client.MSetNX(ctx, "key1", "hello1", "key2", "hello2")
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(true))
+
+ mSetNX = client.MSetNX(ctx, "key2", "hello1", "key3", "hello2")
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(false))
+ })
+
+ It("should SetWithArgs with TTL", func() {
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ }
+ err := client.SetArgs(ctx, "key", "hello", args).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ Eventually(func() error {
+ return client.Get(ctx, "key").Err()
+ }, "2s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should SetWithArgs with expiration date", func() {
+ expireAt := time.Now().AddDate(1, 1, 1)
+ args := redis.SetArgs{
+ ExpireAt: expireAt,
+ }
+ err := client.SetArgs(ctx, "key", "hello", args).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ // check the key has an expiration date
+ // (so a TTL value different of -1)
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).ToNot(Equal(-1))
+ })
+
+ It("should SetWithArgs with negative expiration date", func() {
+ args := redis.SetArgs{
+ ExpireAt: time.Now().AddDate(-3, 1, 1),
+ }
+ // redis accepts a timestamp less than the current date
+ // but returns nil when trying to get the key
+ err := client.SetArgs(ctx, "key", "hello", args).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with keepttl", func() {
+ // Set with ttl
+ argsWithTTL := redis.SetArgs{
+ TTL: 5 * time.Second,
+ }
+ set := client.SetArgs(ctx, "key", "hello", argsWithTTL)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Result()).To(Equal("OK"))
+
+ // Set with keepttl
+ argsWithKeepTTL := redis.SetArgs{
+ KeepTTL: true,
+ }
+ set = client.SetArgs(ctx, "key", "hello", argsWithKeepTTL)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Result()).To(Equal("OK"))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ // set keepttl will Retain the ttl associated with the key
+ Expect(ttl.Val().Nanoseconds()).NotTo(Equal(-1))
+ })
+
+ It("should SetWithArgs with NX mode and key exists", func() {
+ err := client.Set(ctx, "key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ Mode: "nx",
+ }
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with NX mode and key does not exist", func() {
+ args := redis.SetArgs{
+ Mode: "nx",
+ }
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("OK"))
+ })
+
+ It("should SetWithArgs with NX mode and GET option", func() {
+ args := redis.SetArgs{
+ Mode: "nx",
+ Get: true,
+ }
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with expiration, NX mode, and key does not exist", func() {
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ Mode: "nx",
+ }
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("OK"))
+
+ Eventually(func() error {
+ return client.Get(ctx, "key").Err()
+ }, "1s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should SetWithArgs with expiration, NX mode, and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0)
+ Expect(e.Err()).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ Mode: "nx",
+ }
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with expiration, NX mode, and GET option", func() {
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ Mode: "nx",
+ Get: true,
+ }
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with XX mode and key does not exist", func() {
+ args := redis.SetArgs{
+ Mode: "xx",
+ }
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with XX mode and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0).Err()
+ Expect(e).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ Mode: "xx",
+ }
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("OK"))
+ })
+
+ It("should SetWithArgs with XX mode and GET option, and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0).Err()
+ Expect(e).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ Mode: "xx",
+ Get: true,
+ }
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should SetWithArgs with XX mode and GET option, and key does not exist", func() {
+ args := redis.SetArgs{
+ Mode: "xx",
+ Get: true,
+ }
+
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with expiration, XX mode, GET option, and key does not exist", func() {
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ Mode: "xx",
+ Get: true,
+ }
+
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with expiration, XX mode, GET option, and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0)
+ Expect(e.Err()).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ TTL: 500 * time.Millisecond,
+ Mode: "xx",
+ Get: true,
+ }
+
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ Eventually(func() error {
+ return client.Get(ctx, "key").Err()
+ }, "1s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should SetWithArgs with Get and key does not exist yet", func() {
+ args := redis.SetArgs{
+ Get: true,
+ }
+
+ val, err := client.SetArgs(ctx, "key", "hello", args).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+ })
+
+ It("should SetWithArgs with Get and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0)
+ Expect(e.Err()).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ Get: true,
+ }
+
+ val, err := client.SetArgs(ctx, "key", "world", args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should Pipelined SetArgs with Get and key exists", func() {
+ e := client.Set(ctx, "key", "hello", 0)
+ Expect(e.Err()).NotTo(HaveOccurred())
+
+ args := redis.SetArgs{
+ Get: true,
+ }
+
+ pipe := client.Pipeline()
+ setArgs := pipe.SetArgs(ctx, "key", "world", args)
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(setArgs.Err()).NotTo(HaveOccurred())
+ Expect(setArgs.Val()).To(Equal("hello"))
+ })
+
+ It("should Set with expiration", func() {
+ err := client.Set(ctx, "key", "hello", 100*time.Millisecond).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ Eventually(func() error {
+ return client.Get(ctx, "key").Err()
+ }, "1s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should Set with keepttl", func() {
+ // set with ttl
+ set := client.Set(ctx, "key", "hello", 5*time.Second)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ // set with keepttl
+ set = client.Set(ctx, "key", "hello1", redis.KeepTTL)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ // set keepttl will Retain the ttl associated with the key
+ Expect(ttl.Val().Nanoseconds()).NotTo(Equal(-1))
+ })
+
+ It("should SetGet", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should SetEX", func() {
+ err := client.SetEX(ctx, "key", "hello", 1*time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ Eventually(func() error {
+ return client.Get(ctx, "foo").Err()
+ }, "2s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should SetNX", func() {
+ setNX := client.SetNX(ctx, "key", "hello", 0)
+ Expect(setNX.Err()).NotTo(HaveOccurred())
+ Expect(setNX.Val()).To(Equal(true))
+
+ setNX = client.SetNX(ctx, "key", "hello2", 0)
+ Expect(setNX.Err()).NotTo(HaveOccurred())
+ Expect(setNX.Val()).To(Equal(false))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should SetNX with expiration", func() {
+ isSet, err := client.SetNX(ctx, "key", "hello", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ isSet, err = client.SetNX(ctx, "key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should SetNX with keepttl", func() {
+ isSet, err := client.SetNX(ctx, "key", "hello1", redis.KeepTTL).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ ttl := client.TTL(ctx, "key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val().Nanoseconds()).To(Equal(int64(-1)))
+ })
+
+ It("should SetXX", func() {
+ isSet, err := client.SetXX(ctx, "key", "hello2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ err = client.Set(ctx, "key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ isSet, err = client.SetXX(ctx, "key", "hello2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello2"))
+ })
+
+ It("should SetXX with expiration", func() {
+ isSet, err := client.SetXX(ctx, "key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ err = client.Set(ctx, "key", "hello", time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ isSet, err = client.SetXX(ctx, "key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello2"))
+ })
+
+ It("should SetXX with keepttl", func() {
+ isSet, err := client.SetXX(ctx, "key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ err = client.Set(ctx, "key", "hello", time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ isSet, err = client.SetXX(ctx, "key", "hello2", 5*time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ isSet, err = client.SetXX(ctx, "key", "hello3", redis.KeepTTL).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ val, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello3"))
+
+ // set keepttl will Retain the ttl associated with the key
+ ttl, err := client.TTL(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ttl).NotTo(Equal(-1))
+ })
+
+ It("should SetRange", func() {
+ set := client.Set(ctx, "key", "Hello World", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ range_ := client.SetRange(ctx, "key", 6, "Redis")
+ Expect(range_.Err()).NotTo(HaveOccurred())
+ Expect(range_.Val()).To(Equal(int64(11)))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("Hello Redis"))
+ })
+
+ It("should StrLen", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ strLen := client.StrLen(ctx, "key")
+ Expect(strLen.Err()).NotTo(HaveOccurred())
+ Expect(strLen.Val()).To(Equal(int64(5)))
+
+ strLen = client.StrLen(ctx, "_")
+ Expect(strLen.Err()).NotTo(HaveOccurred())
+ Expect(strLen.Val()).To(Equal(int64(0)))
+ })
+
+ It("should Copy", func() {
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ copy := client.Copy(ctx, "key", "newKey", redisOptions().DB, false)
+ Expect(copy.Err()).NotTo(HaveOccurred())
+ Expect(copy.Val()).To(Equal(int64(1)))
+
+ // Value is available by both keys now
+ getOld := client.Get(ctx, "key")
+ Expect(getOld.Err()).NotTo(HaveOccurred())
+ Expect(getOld.Val()).To(Equal("hello"))
+ getNew := client.Get(ctx, "newKey")
+ Expect(getNew.Err()).NotTo(HaveOccurred())
+ Expect(getNew.Val()).To(Equal("hello"))
+
+ // Overwriting an existing key should not succeed
+ overwrite := client.Copy(ctx, "newKey", "key", redisOptions().DB, false)
+ Expect(overwrite.Val()).To(Equal(int64(0)))
+
+ // Overwrite is allowed when replace=rue
+ replace := client.Copy(ctx, "newKey", "key", redisOptions().DB, true)
+ Expect(replace.Val()).To(Equal(int64(1)))
+ })
+ })
+
+ Describe("hashes", func() {
+ It("should HDel", func() {
+ hSet := client.HSet(ctx, "hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hDel := client.HDel(ctx, "hash", "key")
+ Expect(hDel.Err()).NotTo(HaveOccurred())
+ Expect(hDel.Val()).To(Equal(int64(1)))
+
+ hDel = client.HDel(ctx, "hash", "key")
+ Expect(hDel.Err()).NotTo(HaveOccurred())
+ Expect(hDel.Val()).To(Equal(int64(0)))
+ })
+
+ It("should HExists", func() {
+ hSet := client.HSet(ctx, "hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hExists := client.HExists(ctx, "hash", "key")
+ Expect(hExists.Err()).NotTo(HaveOccurred())
+ Expect(hExists.Val()).To(Equal(true))
+
+ hExists = client.HExists(ctx, "hash", "key1")
+ Expect(hExists.Err()).NotTo(HaveOccurred())
+ Expect(hExists.Val()).To(Equal(false))
+ })
+
+ It("should HGet", func() {
+ hSet := client.HSet(ctx, "hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hGet := client.HGet(ctx, "hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+
+ hGet = client.HGet(ctx, "hash", "key1")
+ Expect(hGet.Err()).To(Equal(redis.Nil))
+ Expect(hGet.Val()).To(Equal(""))
+ })
+
+ It("should HGetAll", func() {
+ err := client.HSet(ctx, "hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet(ctx, "hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ m, err := client.HGetAll(ctx, "hash").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(m).To(Equal(map[string]string{"key1": "hello1", "key2": "hello2"}))
+ })
+
+ It("should scan", func() {
+ err := client.HMSet(ctx, "hash", "key1", "hello1", "key2", 123).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ res := client.HGetAll(ctx, "hash")
+ Expect(res.Err()).NotTo(HaveOccurred())
+
+ type data struct {
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ }
+ var d data
+ Expect(res.Scan(&d)).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ })
+
+ It("should HIncrBy", func() {
+ hSet := client.HSet(ctx, "hash", "key", "5")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hIncrBy := client.HIncrBy(ctx, "hash", "key", 1)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(6)))
+
+ hIncrBy = client.HIncrBy(ctx, "hash", "key", -1)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(5)))
+
+ hIncrBy = client.HIncrBy(ctx, "hash", "key", -10)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(-5)))
+ })
+
+ It("should HIncrByFloat", func() {
+ hSet := client.HSet(ctx, "hash", "field", "10.50")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(1)))
+
+ hIncrByFloat := client.HIncrByFloat(ctx, "hash", "field", 0.1)
+ Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(hIncrByFloat.Val()).To(Equal(10.6))
+
+ hSet = client.HSet(ctx, "hash", "field", "5.0e3")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(0)))
+
+ hIncrByFloat = client.HIncrByFloat(ctx, "hash", "field", 2.0e2)
+ Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(hIncrByFloat.Val()).To(Equal(float64(5200)))
+ })
+
+ It("should HKeys", func() {
+ hkeys := client.HKeys(ctx, "hash")
+ Expect(hkeys.Err()).NotTo(HaveOccurred())
+ Expect(hkeys.Val()).To(Equal([]string{}))
+
+ hset := client.HSet(ctx, "hash", "key1", "hello1")
+ Expect(hset.Err()).NotTo(HaveOccurred())
+ hset = client.HSet(ctx, "hash", "key2", "hello2")
+ Expect(hset.Err()).NotTo(HaveOccurred())
+
+ hkeys = client.HKeys(ctx, "hash")
+ Expect(hkeys.Err()).NotTo(HaveOccurred())
+ Expect(hkeys.Val()).To(Equal([]string{"key1", "key2"}))
+ })
+
+ It("should HLen", func() {
+ hSet := client.HSet(ctx, "hash", "key1", "hello1")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ hSet = client.HSet(ctx, "hash", "key2", "hello2")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hLen := client.HLen(ctx, "hash")
+ Expect(hLen.Err()).NotTo(HaveOccurred())
+ Expect(hLen.Val()).To(Equal(int64(2)))
+ })
+
+ It("should HMGet", func() {
+ err := client.HSet(ctx, "hash", "key1", "hello1", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.HMGet(ctx, "hash", "key1", "key2", "_").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"hello1", "hello2", nil}))
+ })
+
+ It("should HSet", func() {
+ ok, err := client.HSet(ctx, "hash", map[string]interface{}{
+ "key1": "hello1",
+ "key2": "hello2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ok).To(Equal(int64(2)))
+
+ v, err := client.HGet(ctx, "hash", "key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("hello1"))
+
+ v, err = client.HGet(ctx, "hash", "key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("hello2"))
+
+ keys, err := client.HKeys(ctx, "hash").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(ConsistOf([]string{"key1", "key2"}))
+ })
+
+ It("should HSet", func() {
+ hSet := client.HSet(ctx, "hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(1)))
+
+ hGet := client.HGet(ctx, "hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+ })
+
+ It("should HSetNX", func() {
+ hSetNX := client.HSetNX(ctx, "hash", "key", "hello")
+ Expect(hSetNX.Err()).NotTo(HaveOccurred())
+ Expect(hSetNX.Val()).To(Equal(true))
+
+ hSetNX = client.HSetNX(ctx, "hash", "key", "hello")
+ Expect(hSetNX.Err()).NotTo(HaveOccurred())
+ Expect(hSetNX.Val()).To(Equal(false))
+
+ hGet := client.HGet(ctx, "hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+ })
+
+ It("should HVals", func() {
+ err := client.HSet(ctx, "hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet(ctx, "hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.HVals(ctx, "hash").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]string{"hello1", "hello2"}))
+
+ var slice []string
+ err = client.HVals(ctx, "hash").ScanSlice(&slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]string{"hello1", "hello2"}))
+ })
+
+ It("should HRandField", func() {
+ err := client.HSet(ctx, "hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet(ctx, "hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v := client.HRandField(ctx, "hash", 1, false)
+ Expect(v.Err()).NotTo(HaveOccurred())
+ Expect(v.Val()).To(Or(Equal([]string{"key1"}), Equal([]string{"key2"})))
+
+ v = client.HRandField(ctx, "hash", 0, false)
+ Expect(v.Err()).NotTo(HaveOccurred())
+ Expect(v.Val()).To(HaveLen(0))
+
+ var slice []string
+ err = client.HRandField(ctx, "hash", 1, true).ScanSlice(&slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Or(Equal([]string{"key1", "hello1"}), Equal([]string{"key2", "hello2"})))
+ })
+ })
+
+ Describe("hyperloglog", func() {
+ It("should PFMerge", func() {
+ pfAdd := client.PFAdd(ctx, "hll1", "1", "2", "3", "4", "5")
+ Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+ pfCount := client.PFCount(ctx, "hll1")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(5)))
+
+ pfAdd = client.PFAdd(ctx, "hll2", "a", "b", "c", "d", "e")
+ Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+ pfMerge := client.PFMerge(ctx, "hllMerged", "hll1", "hll2")
+ Expect(pfMerge.Err()).NotTo(HaveOccurred())
+
+ pfCount = client.PFCount(ctx, "hllMerged")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(10)))
+
+ pfCount = client.PFCount(ctx, "hll1", "hll2")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(10)))
+ })
+ })
+
+ Describe("lists", func() {
+ It("should BLPop", func() {
+ rPush := client.RPush(ctx, "list1", "a", "b", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ bLPop := client.BLPop(ctx, 0, "list1", "list2")
+ Expect(bLPop.Err()).NotTo(HaveOccurred())
+ Expect(bLPop.Val()).To(Equal([]string{"list1", "a"}))
+ })
+
+ It("should BLPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ bLPop := client.BLPop(ctx, 0, "list")
+ Expect(bLPop.Err()).NotTo(HaveOccurred())
+ Expect(bLPop.Val()).To(Equal([]string{"list", "a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BLPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := client.RPush(ctx, "list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BLPop is still blocked")
+ }
+ })
+
+ It("should BLPop timeout", func() {
+ val, err := client.BLPop(ctx, time.Second, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("should BRPop", func() {
+ rPush := client.RPush(ctx, "list1", "a", "b", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ bRPop := client.BRPop(ctx, 0, "list1", "list2")
+ Expect(bRPop.Err()).NotTo(HaveOccurred())
+ Expect(bRPop.Val()).To(Equal([]string{"list1", "c"}))
+ })
+
+ It("should BRPop blocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ brpop := client.BRPop(ctx, 0, "list")
+ Expect(brpop.Err()).NotTo(HaveOccurred())
+ Expect(brpop.Val()).To(Equal([]string{"list", "a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BRPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := client.RPush(ctx, "list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BRPop is still blocked")
+ // ok
+ }
+ })
+
+ It("should BRPopLPush", func() {
+ _, err := client.BRPopLPush(ctx, "list1", "list2", time.Second).Result()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.RPush(ctx, "list1", "a", "b", "c").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.BRPopLPush(ctx, "list1", "list2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("c"))
+ })
+
+ It("should LIndex", func() {
+ lPush := client.LPush(ctx, "list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush(ctx, "list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lIndex := client.LIndex(ctx, "list", 0)
+ Expect(lIndex.Err()).NotTo(HaveOccurred())
+ Expect(lIndex.Val()).To(Equal("Hello"))
+
+ lIndex = client.LIndex(ctx, "list", -1)
+ Expect(lIndex.Err()).NotTo(HaveOccurred())
+ Expect(lIndex.Val()).To(Equal("World"))
+
+ lIndex = client.LIndex(ctx, "list", 3)
+ Expect(lIndex.Err()).To(Equal(redis.Nil))
+ Expect(lIndex.Val()).To(Equal(""))
+ })
+
+ It("should LInsert", func() {
+ rPush := client.RPush(ctx, "list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "World")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lInsert := client.LInsert(ctx, "list", "BEFORE", "World", "There")
+ Expect(lInsert.Err()).NotTo(HaveOccurred())
+ Expect(lInsert.Val()).To(Equal(int64(3)))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "There", "World"}))
+ })
+
+ It("should LLen", func() {
+ lPush := client.LPush(ctx, "list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush(ctx, "list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lLen := client.LLen(ctx, "list")
+ Expect(lLen.Err()).NotTo(HaveOccurred())
+ Expect(lLen.Val()).To(Equal(int64(2)))
+ })
+
+ It("should LPop", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lPop := client.LPop(ctx, "list")
+ Expect(lPop.Err()).NotTo(HaveOccurred())
+ Expect(lPop.Val()).To(Equal("one"))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should LPopCount", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "four")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lPopCount := client.LPopCount(ctx, "list", 2)
+ Expect(lPopCount.Err()).NotTo(HaveOccurred())
+ Expect(lPopCount.Val()).To(Equal([]string{"one", "two"}))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"three", "four"}))
+ })
+
+ It("should LPos", func() {
+ rPush := client.RPush(ctx, "list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "b")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "b")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lPos := client.LPos(ctx, "list", "b", redis.LPosArgs{})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal(int64(1)))
+
+ lPos = client.LPos(ctx, "list", "b", redis.LPosArgs{Rank: 2})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal(int64(3)))
+
+ lPos = client.LPos(ctx, "list", "b", redis.LPosArgs{Rank: -2})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal(int64(1)))
+
+ lPos = client.LPos(ctx, "list", "b", redis.LPosArgs{Rank: 2, MaxLen: 1})
+ Expect(lPos.Err()).To(Equal(redis.Nil))
+
+ lPos = client.LPos(ctx, "list", "z", redis.LPosArgs{})
+ Expect(lPos.Err()).To(Equal(redis.Nil))
+ })
+
+ It("should LPosCount", func() {
+ rPush := client.RPush(ctx, "list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "b")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "b")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lPos := client.LPosCount(ctx, "list", "b", 2, redis.LPosArgs{})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal([]int64{1, 3}))
+
+ lPos = client.LPosCount(ctx, "list", "b", 2, redis.LPosArgs{Rank: 2})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal([]int64{3}))
+
+ lPos = client.LPosCount(ctx, "list", "b", 1, redis.LPosArgs{Rank: 1, MaxLen: 1})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal([]int64{}))
+
+ lPos = client.LPosCount(ctx, "list", "b", 1, redis.LPosArgs{Rank: 1, MaxLen: 0})
+ Expect(lPos.Err()).NotTo(HaveOccurred())
+ Expect(lPos.Val()).To(Equal([]int64{1}))
+ })
+
+ It("should LPush", func() {
+ lPush := client.LPush(ctx, "list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush(ctx, "list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+ })
+
+ It("should LPushX", func() {
+ lPush := client.LPush(ctx, "list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lPushX := client.LPushX(ctx, "list", "Hello")
+ Expect(lPushX.Err()).NotTo(HaveOccurred())
+ Expect(lPushX.Val()).To(Equal(int64(2)))
+
+ lPush = client.LPush(ctx, "list1", "three")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ Expect(lPush.Val()).To(Equal(int64(1)))
+
+ lPushX = client.LPushX(ctx, "list1", "two", "one")
+ Expect(lPushX.Err()).NotTo(HaveOccurred())
+ Expect(lPushX.Val()).To(Equal(int64(3)))
+
+ lPushX = client.LPushX(ctx, "list2", "Hello")
+ Expect(lPushX.Err()).NotTo(HaveOccurred())
+ Expect(lPushX.Val()).To(Equal(int64(0)))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+ lRange = client.LRange(ctx, "list1", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange(ctx, "list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ It("should LRange", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lRange := client.LRange(ctx, "list", 0, 0)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one"}))
+
+ lRange = client.LRange(ctx, "list", -3, 2)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange(ctx, "list", -100, 100)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange(ctx, "list", 5, 10)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ It("should LRem", func() {
+ rPush := client.RPush(ctx, "list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "key")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lRem := client.LRem(ctx, "list", -2, "hello")
+ Expect(lRem.Err()).NotTo(HaveOccurred())
+ Expect(lRem.Val()).To(Equal(int64(2)))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"hello", "key"}))
+ })
+
+ It("should LSet", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lSet := client.LSet(ctx, "list", 0, "four")
+ Expect(lSet.Err()).NotTo(HaveOccurred())
+ Expect(lSet.Val()).To(Equal("OK"))
+
+ lSet = client.LSet(ctx, "list", -2, "five")
+ Expect(lSet.Err()).NotTo(HaveOccurred())
+ Expect(lSet.Val()).To(Equal("OK"))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"four", "five", "three"}))
+ })
+
+ It("should LTrim", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lTrim := client.LTrim(ctx, "list", 1, -1)
+ Expect(lTrim.Err()).NotTo(HaveOccurred())
+ Expect(lTrim.Val()).To(Equal("OK"))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should RPop", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ rPop := client.RPop(ctx, "list")
+ Expect(rPop.Err()).NotTo(HaveOccurred())
+ Expect(rPop.Val()).To(Equal("three"))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+ })
+
+ It("should RPopCount", func() {
+ rPush := client.RPush(ctx, "list", "one", "two", "three", "four")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(4)))
+
+ rPopCount := client.RPopCount(ctx, "list", 2)
+ Expect(rPopCount.Err()).NotTo(HaveOccurred())
+ Expect(rPopCount.Val()).To(Equal([]string{"four", "three"}))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+ })
+
+ It("should RPopLPush", func() {
+ rPush := client.RPush(ctx, "list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush(ctx, "list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ rPopLPush := client.RPopLPush(ctx, "list", "list2")
+ Expect(rPopLPush.Err()).NotTo(HaveOccurred())
+ Expect(rPopLPush.Val()).To(Equal("three"))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+
+ lRange = client.LRange(ctx, "list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"three"}))
+ })
+
+ It("should RPush", func() {
+ rPush := client.RPush(ctx, "list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPush = client.RPush(ctx, "list", "World")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(2)))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+ })
+
+ It("should RPushX", func() {
+ rPush := client.RPush(ctx, "list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPushX := client.RPushX(ctx, "list", "World")
+ Expect(rPushX.Err()).NotTo(HaveOccurred())
+ Expect(rPushX.Val()).To(Equal(int64(2)))
+
+ rPush = client.RPush(ctx, "list1", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPushX = client.RPushX(ctx, "list1", "two", "three")
+ Expect(rPushX.Err()).NotTo(HaveOccurred())
+ Expect(rPushX.Val()).To(Equal(int64(3)))
+
+ rPushX = client.RPushX(ctx, "list2", "World")
+ Expect(rPushX.Err()).NotTo(HaveOccurred())
+ Expect(rPushX.Val()).To(Equal(int64(0)))
+
+ lRange := client.LRange(ctx, "list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+ lRange = client.LRange(ctx, "list1", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange(ctx, "list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ It("should LMove", func() {
+ rPush := client.RPush(ctx, "lmove1", "ichi")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPush = client.RPush(ctx, "lmove1", "ni")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(2)))
+
+ rPush = client.RPush(ctx, "lmove1", "san")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(3)))
+
+ lMove := client.LMove(ctx, "lmove1", "lmove2", "RIGHT", "LEFT")
+ Expect(lMove.Err()).NotTo(HaveOccurred())
+ Expect(lMove.Val()).To(Equal("san"))
+
+ lRange := client.LRange(ctx, "lmove2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"san"}))
+ })
+
+ It("should BLMove", func() {
+ rPush := client.RPush(ctx, "blmove1", "ichi")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPush = client.RPush(ctx, "blmove1", "ni")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(2)))
+
+ rPush = client.RPush(ctx, "blmove1", "san")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(3)))
+
+ blMove := client.BLMove(ctx, "blmove1", "blmove2", "RIGHT", "LEFT", time.Second)
+ Expect(blMove.Err()).NotTo(HaveOccurred())
+ Expect(blMove.Val()).To(Equal("san"))
+
+ lRange := client.LRange(ctx, "blmove2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"san"}))
+ })
+ })
+
+ Describe("sets", func() {
+ It("should SAdd", func() {
+ sAdd := client.SAdd(ctx, "set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd(ctx, "set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd(ctx, "set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(0)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+ })
+
+ It("should SAdd strings", func() {
+ set := []string{"Hello", "World", "World"}
+ sAdd := client.SAdd(ctx, "set", set)
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(2)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+ })
+
+ It("should SCard", func() {
+ sAdd := client.SAdd(ctx, "set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd(ctx, "set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sCard := client.SCard(ctx, "set")
+ Expect(sCard.Err()).NotTo(HaveOccurred())
+ Expect(sCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should SDiff", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sDiff := client.SDiff(ctx, "set1", "set2")
+ Expect(sDiff.Err()).NotTo(HaveOccurred())
+ Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"}))
+ })
+
+ It("should SDiffStore", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sDiffStore := client.SDiffStore(ctx, "set", "set1", "set2")
+ Expect(sDiffStore.Err()).NotTo(HaveOccurred())
+ Expect(sDiffStore.Val()).To(Equal(int64(2)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"}))
+ })
+
+ It("should SInter", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sInter := client.SInter(ctx, "set1", "set2")
+ Expect(sInter.Err()).NotTo(HaveOccurred())
+ Expect(sInter.Val()).To(Equal([]string{"c"}))
+ })
+
+ It("should SInterStore", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sInterStore := client.SInterStore(ctx, "set", "set1", "set2")
+ Expect(sInterStore.Err()).NotTo(HaveOccurred())
+ Expect(sInterStore.Val()).To(Equal(int64(1)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(Equal([]string{"c"}))
+ })
+
+ It("should IsMember", func() {
+ sAdd := client.SAdd(ctx, "set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sIsMember := client.SIsMember(ctx, "set", "one")
+ Expect(sIsMember.Err()).NotTo(HaveOccurred())
+ Expect(sIsMember.Val()).To(Equal(true))
+
+ sIsMember = client.SIsMember(ctx, "set", "two")
+ Expect(sIsMember.Err()).NotTo(HaveOccurred())
+ Expect(sIsMember.Val()).To(Equal(false))
+ })
+
+ It("should SMIsMember", func() {
+ sAdd := client.SAdd(ctx, "set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMIsMember := client.SMIsMember(ctx, "set", "one", "two")
+ Expect(sMIsMember.Err()).NotTo(HaveOccurred())
+ Expect(sMIsMember.Val()).To(Equal([]bool{true, false}))
+ })
+
+ It("should SMembers", func() {
+ sAdd := client.SAdd(ctx, "set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+ })
+
+ It("should SMembersMap", func() {
+ sAdd := client.SAdd(ctx, "set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMembersMap := client.SMembersMap(ctx, "set")
+ Expect(sMembersMap.Err()).NotTo(HaveOccurred())
+ Expect(sMembersMap.Val()).To(Equal(map[string]struct{}{"Hello": {}, "World": {}}))
+ })
+
+ It("should SMove", func() {
+ sAdd := client.SAdd(ctx, "set1", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMove := client.SMove(ctx, "set1", "set2", "two")
+ Expect(sMove.Err()).NotTo(HaveOccurred())
+ Expect(sMove.Val()).To(Equal(true))
+
+ sMembers := client.SMembers(ctx, "set1")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(Equal([]string{"one"}))
+
+ sMembers = client.SMembers(ctx, "set2")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+ })
+
+ It("should SPop", func() {
+ sAdd := client.SAdd(ctx, "set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sPop := client.SPop(ctx, "set")
+ Expect(sPop.Err()).NotTo(HaveOccurred())
+ Expect(sPop.Val()).NotTo(Equal(""))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(2))
+ })
+
+ It("should SPopN", func() {
+ sAdd := client.SAdd(ctx, "set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "four")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sPopN := client.SPopN(ctx, "set", 1)
+ Expect(sPopN.Err()).NotTo(HaveOccurred())
+ Expect(sPopN.Val()).NotTo(Equal([]string{""}))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(3))
+
+ sPopN = client.SPopN(ctx, "set", 4)
+ Expect(sPopN.Err()).NotTo(HaveOccurred())
+ Expect(sPopN.Val()).To(HaveLen(3))
+
+ sMembers = client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(0))
+ })
+
+ It("should SRandMember and SRandMemberN", func() {
+ err := client.SAdd(ctx, "set", "one").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.SAdd(ctx, "set", "two").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.SAdd(ctx, "set", "three").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ members, err := client.SMembers(ctx, "set").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(HaveLen(3))
+
+ member, err := client.SRandMember(ctx, "set").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(member).NotTo(Equal(""))
+
+ members, err = client.SRandMemberN(ctx, "set", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(HaveLen(2))
+ })
+
+ It("should SRem", func() {
+ sAdd := client.SAdd(ctx, "set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sRem := client.SRem(ctx, "set", "one")
+ Expect(sRem.Err()).NotTo(HaveOccurred())
+ Expect(sRem.Val()).To(Equal(int64(1)))
+
+ sRem = client.SRem(ctx, "set", "four")
+ Expect(sRem.Err()).NotTo(HaveOccurred())
+ Expect(sRem.Val()).To(Equal(int64(0)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+ })
+
+ It("should SUnion", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sUnion := client.SUnion(ctx, "set1", "set2")
+ Expect(sUnion.Err()).NotTo(HaveOccurred())
+ Expect(sUnion.Val()).To(HaveLen(5))
+ })
+
+ It("should SUnionStore", func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sUnionStore := client.SUnionStore(ctx, "set", "set1", "set2")
+ Expect(sUnionStore.Err()).NotTo(HaveOccurred())
+ Expect(sUnionStore.Val()).To(Equal(int64(5)))
+
+ sMembers := client.SMembers(ctx, "set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(5))
+ })
+ })
+
+ Describe("sorted sets", func() {
+ It("should BZPopMax", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ member, err := client.BZPopMax(ctx, 0, "zset1", "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(member).To(Equal(&redis.ZWithKey{
+ Z: redis.Z{
+ Score: 3,
+ Member: "three",
+ },
+ Key: "zset1",
+ }))
+ })
+
+ It("should BZPopMax blocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ bZPopMax := client.BZPopMax(ctx, 0, "zset")
+ Expect(bZPopMax.Err()).NotTo(HaveOccurred())
+ Expect(bZPopMax.Val()).To(Equal(&redis.ZWithKey{
+ Z: redis.Z{
+ Member: "a",
+ Score: 1,
+ },
+ Key: "zset",
+ }))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BZPopMax is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ Member: "a",
+ Score: 1,
+ })
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BZPopMax is still blocked")
+ }
+ })
+
+ It("should BZPopMax timeout", func() {
+ val, err := client.BZPopMax(ctx, time.Second, "zset1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("should BZPopMin", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ member, err := client.BZPopMin(ctx, 0, "zset1", "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(member).To(Equal(&redis.ZWithKey{
+ Z: redis.Z{
+ Score: 1,
+ Member: "one",
+ },
+ Key: "zset1",
+ }))
+ })
+
+ It("should BZPopMin blocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ bZPopMin := client.BZPopMin(ctx, 0, "zset")
+ Expect(bZPopMin.Err()).NotTo(HaveOccurred())
+ Expect(bZPopMin.Val()).To(Equal(&redis.ZWithKey{
+ Z: redis.Z{
+ Member: "a",
+ Score: 1,
+ },
+ Key: "zset",
+ }))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BZPopMin is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ Member: "a",
+ Score: 1,
+ })
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BZPopMin is still blocked")
+ }
+ })
+
+ It("should BZPopMin timeout", func() {
+ val, err := client.BZPopMin(ctx, time.Second, "zset1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("should ZAdd", func() {
+ added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "uno",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "two",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 1,
+ Member: "uno",
+ }, {
+ Score: 3,
+ Member: "two",
+ }}))
+ })
+
+ It("should ZAdd bytes", func() {
+ added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: []byte("one"),
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: []byte("uno"),
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: []byte("two"),
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: []byte("two"),
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 1,
+ Member: "uno",
+ }, {
+ Score: 3,
+ Member: "two",
+ }}))
+ })
+
+ It("should ZAddArgs", func() {
+ // Test only the GT+LT options.
+ added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ GT: true,
+ Members: []redis.Z{{Score: 1, Member: "one"}},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ added, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ GT: true,
+ Members: []redis.Z{{Score: 2, Member: "one"}},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ LT: true,
+ Members: []redis.Z{{Score: 1, Member: "one"}},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+ })
+
+ It("should ZAddNX", func() {
+ added, err := client.ZAddNX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ added, err = client.ZAddNX(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+ })
+
+ It("should ZAddXX", func() {
+ added, err := client.ZAddXX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAddXX(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+ })
+
+ // TODO: remove in v9.
+ It("should ZAddCh", func() {
+ changed, err := client.ZAddCh(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ changed, err = client.ZAddCh(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+ })
+
+ // TODO: remove in v9.
+ It("should ZAddNXCh", func() {
+ changed, err := client.ZAddNXCh(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ changed, err = client.ZAddNXCh(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+ })
+
+ // TODO: remove in v9.
+ It("should ZAddXXCh", func() {
+ changed, err := client.ZAddXXCh(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ changed, err = client.ZAddXXCh(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+ })
+
+ // TODO: remove in v9.
+ It("should ZIncr", func() {
+ score, err := client.ZIncr(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ score, err = client.ZIncr(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(2)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+ })
+
+ // TODO: remove in v9.
+ It("should ZIncrNX", func() {
+ score, err := client.ZIncrNX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ score, err = client.ZIncrNX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(score).To(Equal(float64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+ })
+
+ // TODO: remove in v9.
+ It("should ZIncrXX", func() {
+ score, err := client.ZIncrXX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(score).To(Equal(float64(0)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ score, err = client.ZIncrXX(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(2)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+ })
+
+ It("should ZCard", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ card, err := client.ZCard(ctx, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(card).To(Equal(int64(2)))
+ })
+
+ It("should ZCount", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ count, err := client.ZCount(ctx, "zset", "-inf", "+inf").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(count).To(Equal(int64(3)))
+
+ count, err = client.ZCount(ctx, "zset", "(1", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(count).To(Equal(int64(2)))
+
+ count, err = client.ZLexCount(ctx, "zset", "-", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(count).To(Equal(int64(3)))
+ })
+
+ It("should ZIncrBy", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.ZIncrBy(ctx, "zset", 2, "one").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(float64(3)))
+
+ val, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "one",
+ }}))
+ })
+
+ It("should ZInterStore", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset3", &redis.Z{Score: 3, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []float64{2, 3},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ vals, err := client.ZRangeWithScores(ctx, "out", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 5,
+ Member: "one",
+ }, {
+ Score: 10,
+ Member: "two",
+ }}))
+ })
+
+ It("should ZMScore", func() {
+ zmScore := client.ZMScore(ctx, "zset", "one", "three")
+ Expect(zmScore.Err()).NotTo(HaveOccurred())
+ Expect(zmScore.Val()).To(HaveLen(2))
+ Expect(zmScore.Val()[0]).To(Equal(float64(0)))
+
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zmScore = client.ZMScore(ctx, "zset", "one", "three")
+ Expect(zmScore.Err()).NotTo(HaveOccurred())
+ Expect(zmScore.Val()).To(HaveLen(2))
+ Expect(zmScore.Val()[0]).To(Equal(float64(1)))
+
+ zmScore = client.ZMScore(ctx, "zset", "four")
+ Expect(zmScore.Err()).NotTo(HaveOccurred())
+ Expect(zmScore.Val()).To(HaveLen(1))
+
+ zmScore = client.ZMScore(ctx, "zset", "four", "one")
+ Expect(zmScore.Err()).NotTo(HaveOccurred())
+ Expect(zmScore.Val()).To(HaveLen(2))
+ })
+
+ It("should ZPopMax", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ members, err := client.ZPopMax(ctx, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }}))
+
+ // adding back 3
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ members, err = client.ZPopMax(ctx, "zset", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }}))
+
+ // adding back 2 & 3
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ members, err = client.ZPopMax(ctx, "zset", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+ })
+
+ It("should ZPopMin", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 3,
+ Member: "three",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ members, err := client.ZPopMin(ctx, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ // adding back 1
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ members, err = client.ZPopMin(ctx, "zset", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 2,
+ Member: "two",
+ }}))
+
+ // adding back 1 & 2
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 2,
+ Member: "two",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ members, err = client.ZPopMin(ctx, "zset", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+
+ It("should ZRange", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRange := client.ZRange(ctx, "zset", 0, -1)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ zRange = client.ZRange(ctx, "zset", 2, 3)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"three"}))
+
+ zRange = client.ZRange(ctx, "zset", -2, -1)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should ZRangeWithScores", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "three"}}))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", -2, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+
+ It("should ZRangeArgs", func() {
+ added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ {Score: 2, Member: "two"},
+ {Score: 3, Member: "three"},
+ {Score: 4, Member: "four"},
+ },
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(4)))
+
+ zRange, err := client.ZRangeArgs(ctx, redis.ZRangeArgs{
+ Key: "zset",
+ Start: 1,
+ Stop: 4,
+ ByScore: true,
+ Rev: true,
+ Offset: 1,
+ Count: 2,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zRange).To(Equal([]string{"three", "two"}))
+
+ zRange, err = client.ZRangeArgs(ctx, redis.ZRangeArgs{
+ Key: "zset",
+ Start: "-",
+ Stop: "+",
+ ByLex: true,
+ Rev: true,
+ Offset: 2,
+ Count: 2,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zRange).To(Equal([]string{"two", "one"}))
+
+ zRange, err = client.ZRangeArgs(ctx, redis.ZRangeArgs{
+ Key: "zset",
+ Start: "(1",
+ Stop: "(4",
+ ByScore: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zRange).To(Equal([]string{"two", "three"}))
+
+ // withScores.
+ zSlice, err := client.ZRangeArgsWithScores(ctx, redis.ZRangeArgs{
+ Key: "zset",
+ Start: 1,
+ Stop: 4,
+ ByScore: true,
+ Rev: true,
+ Offset: 1,
+ Count: 2,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zSlice).To(Equal([]redis.Z{
+ {Score: 3, Member: "three"},
+ {Score: 2, Member: "two"},
+ }))
+ })
+
+ It("should ZRangeByScore", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRangeByScore := client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
+ Min: "1",
+ Max: "2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two"}))
+
+ zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
+ Min: "(1",
+ Max: "2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"two"}))
+
+ zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
+ Min: "(1",
+ Max: "(2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{}))
+ })
+
+ It("should ZRangeByLex", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 0,
+ Member: "a",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 0,
+ Member: "b",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{
+ Score: 0,
+ Member: "c",
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRangeByLex := client.ZRangeByLex(ctx, "zset", &redis.ZRangeBy{
+ Min: "-",
+ Max: "+",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b", "c"}))
+
+ zRangeByLex = client.ZRangeByLex(ctx, "zset", &redis.ZRangeBy{
+ Min: "[a",
+ Max: "[b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b"}))
+
+ zRangeByLex = client.ZRangeByLex(ctx, "zset", &redis.ZRangeBy{
+ Min: "(a",
+ Max: "[b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"b"}))
+
+ zRangeByLex = client.ZRangeByLex(ctx, "zset", &redis.ZRangeBy{
+ Min: "(a",
+ Max: "(b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{}))
+ })
+
+ It("should ZRangeByScoreWithScoresMap", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+
+ vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "1",
+ Max: "2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 2,
+ Member: "two",
+ }}))
+
+ vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "(1",
+ Max: "2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "two"}}))
+
+ vals, err = client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "(1",
+ Max: "(2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{}))
+ })
+
+ It("should ZRangeStore", func() {
+ added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ {Score: 2, Member: "two"},
+ {Score: 3, Member: "three"},
+ {Score: 4, Member: "four"},
+ },
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(4)))
+
+ rangeStore, err := client.ZRangeStore(ctx, "new-zset", redis.ZRangeArgs{
+ Key: "zset",
+ Start: 1,
+ Stop: 4,
+ ByScore: true,
+ Rev: true,
+ Offset: 1,
+ Count: 2,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(rangeStore).To(Equal(int64(2)))
+
+ zRange, err := client.ZRange(ctx, "new-zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zRange).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should ZRank", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRank := client.ZRank(ctx, "zset", "three")
+ Expect(zRank.Err()).NotTo(HaveOccurred())
+ Expect(zRank.Val()).To(Equal(int64(2)))
+
+ zRank = client.ZRank(ctx, "zset", "four")
+ Expect(zRank.Err()).To(Equal(redis.Nil))
+ Expect(zRank.Val()).To(Equal(int64(0)))
+ })
+
+ It("should ZRem", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRem := client.ZRem(ctx, "zset", "two")
+ Expect(zRem.Err()).NotTo(HaveOccurred())
+ Expect(zRem.Val()).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+
+ It("should ZRemRangeByRank", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRemRangeByRank := client.ZRemRangeByRank(ctx, "zset", 0, 1)
+ Expect(zRemRangeByRank.Err()).NotTo(HaveOccurred())
+ Expect(zRemRangeByRank.Val()).To(Equal(int64(2)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+
+ It("should ZRemRangeByScore", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRemRangeByScore := client.ZRemRangeByScore(ctx, "zset", "-inf", "(2")
+ Expect(zRemRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRemRangeByScore.Val()).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+
+ It("should ZRemRangeByLex", func() {
+ zz := []*redis.Z{
+ {Score: 0, Member: "aaaa"},
+ {Score: 0, Member: "b"},
+ {Score: 0, Member: "c"},
+ {Score: 0, Member: "d"},
+ {Score: 0, Member: "e"},
+ {Score: 0, Member: "foo"},
+ {Score: 0, Member: "zap"},
+ {Score: 0, Member: "zip"},
+ {Score: 0, Member: "ALPHA"},
+ {Score: 0, Member: "alpha"},
+ }
+ for _, z := range zz {
+ err := client.ZAdd(ctx, "zset", z).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ n, err := client.ZRemRangeByLex(ctx, "zset", "[alpha", "[omega").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(6)))
+
+ vals, err := client.ZRange(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"ALPHA", "aaaa", "zap", "zip"}))
+ })
+
+ It("should ZRevRange", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRevRange := client.ZRevRange(ctx, "zset", 0, -1)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"three", "two", "one"}))
+
+ zRevRange = client.ZRevRange(ctx, "zset", 2, 3)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"one"}))
+
+ zRevRange = client.ZRevRange(ctx, "zset", -2, -1)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"two", "one"}))
+ })
+
+ It("should ZRevRangeWithScoresMap", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.ZRevRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+
+ val, err = client.ZRevRangeWithScores(ctx, "zset", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+
+ val, err = client.ZRevRangeWithScores(ctx, "zset", -2, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+ })
+
+ It("should ZRevRangeByScore", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByScore(
+ ctx, "zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"three", "two", "one"}))
+
+ vals, err = client.ZRevRangeByScore(
+ ctx, "zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"two"}))
+
+ vals, err = client.ZRevRangeByScore(
+ ctx, "zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{}))
+ })
+
+ It("should ZRevRangeByLex", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "a"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "b"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "c"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByLex(
+ ctx, "zset", &redis.ZRangeBy{Max: "+", Min: "-"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"c", "b", "a"}))
+
+ vals, err = client.ZRevRangeByLex(
+ ctx, "zset", &redis.ZRangeBy{Max: "[b", Min: "(a"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"b"}))
+
+ vals, err = client.ZRevRangeByLex(
+ ctx, "zset", &redis.ZRangeBy{Max: "(b", Min: "(a"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{}))
+ })
+
+ It("should ZRevRangeByScoreWithScores", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByScoreWithScores(
+ ctx, "zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+ })
+
+ It("should ZRevRangeByScoreWithScoresMap", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByScoreWithScores(
+ ctx, "zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+
+ vals, err = client.ZRevRangeByScoreWithScores(
+ ctx, "zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "two"}}))
+
+ vals, err = client.ZRevRangeByScoreWithScores(
+ ctx, "zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{}))
+ })
+
+ It("should ZRevRank", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRevRank := client.ZRevRank(ctx, "zset", "one")
+ Expect(zRevRank.Err()).NotTo(HaveOccurred())
+ Expect(zRevRank.Val()).To(Equal(int64(2)))
+
+ zRevRank = client.ZRevRank(ctx, "zset", "four")
+ Expect(zRevRank.Err()).To(Equal(redis.Nil))
+ Expect(zRevRank.Val()).To(Equal(int64(0)))
+ })
+
+ It("should ZScore", func() {
+ zAdd := client.ZAdd(ctx, "zset", &redis.Z{Score: 1.001, Member: "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zScore := client.ZScore(ctx, "zset", "one")
+ Expect(zScore.Err()).NotTo(HaveOccurred())
+ Expect(zScore.Val()).To(Equal(float64(1.001)))
+ })
+
+ It("should ZUnion", func() {
+ err := client.ZAddArgs(ctx, "zset1", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ {Score: 2, Member: "two"},
+ },
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAddArgs(ctx, "zset2", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ {Score: 2, Member: "two"},
+ {Score: 3, Member: "three"},
+ },
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ union, err := client.ZUnion(ctx, redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []float64{2, 3},
+ Aggregate: "sum",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(union).To(Equal([]string{"one", "three", "two"}))
+
+ unionScores, err := client.ZUnionWithScores(ctx, redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []float64{2, 3},
+ Aggregate: "sum",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(unionScores).To(Equal([]redis.Z{
+ {Score: 5, Member: "one"},
+ {Score: 9, Member: "three"},
+ {Score: 10, Member: "two"},
+ }))
+ })
+
+ It("should ZUnionStore", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.ZUnionStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []float64{2, 3},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+
+ val, err := client.ZRangeWithScores(ctx, "out", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{
+ Score: 5,
+ Member: "one",
+ }, {
+ Score: 9,
+ Member: "three",
+ }, {
+ Score: 10,
+ Member: "two",
+ }}))
+ })
+
+ It("should ZRandMember", func() {
+ err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v := client.ZRandMember(ctx, "zset", 1, false)
+ Expect(v.Err()).NotTo(HaveOccurred())
+ Expect(v.Val()).To(Or(Equal([]string{"one"}), Equal([]string{"two"})))
+
+ v = client.ZRandMember(ctx, "zset", 0, false)
+ Expect(v.Err()).NotTo(HaveOccurred())
+ Expect(v.Val()).To(HaveLen(0))
+
+ var slice []string
+ err = client.ZRandMember(ctx, "zset", 1, true).ScanSlice(&slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Or(Equal([]string{"one", "1"}), Equal([]string{"two", "2"})))
+ })
+
+ It("should ZDiff", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.ZDiff(ctx, "zset1", "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should ZDiffWithScores", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.ZDiffWithScores(ctx, "zset1", "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]redis.Z{
+ {
+ Member: "two",
+ Score: 2,
+ },
+ {
+ Member: "three",
+ Score: 3,
+ },
+ }))
+ })
+
+ It("should ZInter", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.ZInter(ctx, &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]string{"one", "two"}))
+ })
+
+ It("should ZInterWithScores", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.ZInterWithScores(ctx, &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []float64{2, 3},
+ Aggregate: "Max",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]redis.Z{
+ {
+ Member: "one",
+ Score: 3,
+ },
+ {
+ Member: "two",
+ Score: 6,
+ },
+ }))
+ })
+
+ It("should ZDiffStore", func() {
+ err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ v, err := client.ZDiffStore(ctx, "out1", "zset1", "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal(int64(0)))
+ v, err = client.ZDiffStore(ctx, "out1", "zset2", "zset1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal(int64(1)))
+ vals, err := client.ZRangeWithScores(ctx, "out1", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }}))
+ })
+ })
+
+ Describe("streams", func() {
+ BeforeEach(func() {
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ ID: "1-0",
+ Values: map[string]interface{}{"uno": "un"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(id).To(Equal("1-0"))
+
+ // Values supports []interface{}.
+ id, err = client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ ID: "2-0",
+ Values: []interface{}{"dos", "deux"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(id).To(Equal("2-0"))
+
+ // Value supports []string.
+ id, err = client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ ID: "3-0",
+ Values: []string{"tres", "troix"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(id).To(Equal("3-0"))
+ })
+
+ // TODO remove in v9.
+ It("should XTrim", func() {
+ n, err := client.XTrim(ctx, "stream", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ // TODO remove in v9.
+ It("should XTrimApprox", func() {
+ n, err := client.XTrimApprox(ctx, "stream", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ // TODO XTrimMaxLenApprox/XTrimMinIDApprox There is a bug in the limit parameter.
+ // TODO Don't test it for now.
+ // TODO link: https://github.com/redis/redis/issues/9046
+ It("should XTrimMaxLen", func() {
+ n, err := client.XTrimMaxLen(ctx, "stream", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XTrimMaxLenApprox", func() {
+ n, err := client.XTrimMaxLenApprox(ctx, "stream", 0, 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XTrimMinID", func() {
+ n, err := client.XTrimMinID(ctx, "stream", "4-0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XTrimMinIDApprox", func() {
+ n, err := client.XTrimMinIDApprox(ctx, "stream", "4-0", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XAdd", func() {
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ Values: map[string]interface{}{"quatro": "quatre"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.XRange(ctx, "stream", "-", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ {ID: id, Values: map[string]interface{}{"quatro": "quatre"}},
+ }))
+ })
+
+ // TODO XAdd There is a bug in the limit parameter.
+ // TODO Don't test it for now.
+ // TODO link: https://github.com/redis/redis/issues/9046
+ It("should XAdd with MaxLen", func() {
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ MaxLen: 1,
+ Values: map[string]interface{}{"quatro": "quatre"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.XRange(ctx, "stream", "-", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.XMessage{
+ {ID: id, Values: map[string]interface{}{"quatro": "quatre"}},
+ }))
+ })
+
+ It("should XAdd with MinID", func() {
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "stream",
+ MinID: "5-0",
+ ID: "4-0",
+ Values: map[string]interface{}{"quatro": "quatre"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(id).To(Equal("4-0"))
+
+ vals, err := client.XRange(ctx, "stream", "-", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(0))
+ })
+
+ It("should XDel", func() {
+ n, err := client.XDel(ctx, "stream", "1-0", "2-0", "3-0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XLen", func() {
+ n, err := client.XLen(ctx, "stream").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XRange", func() {
+ msgs, err := client.XRange(ctx, "stream", "-", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ }))
+
+ msgs, err = client.XRange(ctx, "stream", "2", "+").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ }))
+
+ msgs, err = client.XRange(ctx, "stream", "-", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ }))
+ })
+
+ It("should XRangeN", func() {
+ msgs, err := client.XRangeN(ctx, "stream", "-", "+", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ }))
+
+ msgs, err = client.XRangeN(ctx, "stream", "2", "+", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ }))
+
+ msgs, err = client.XRangeN(ctx, "stream", "-", "2", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ }))
+ })
+
+ It("should XRevRange", func() {
+ msgs, err := client.XRevRange(ctx, "stream", "+", "-").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ }))
+
+ msgs, err = client.XRevRange(ctx, "stream", "+", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ }))
+ })
+
+ It("should XRevRangeN", func() {
+ msgs, err := client.XRevRangeN(ctx, "stream", "+", "-", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ }))
+
+ msgs, err = client.XRevRangeN(ctx, "stream", "+", "2", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ }))
+ })
+
+ It("should XRead", func() {
+ res, err := client.XReadStreams(ctx, "stream", "0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ },
+ },
+ }))
+
+ _, err = client.XReadStreams(ctx, "stream", "3").Result()
+ Expect(err).To(Equal(redis.Nil))
+ })
+
+ It("should XRead", func() {
+ res, err := client.XRead(ctx, &redis.XReadArgs{
+ Streams: []string{"stream", "0"},
+ Count: 2,
+ Block: 100 * time.Millisecond,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ },
+ },
+ }))
+
+ _, err = client.XRead(ctx, &redis.XReadArgs{
+ Streams: []string{"stream", "3"},
+ Count: 1,
+ Block: 100 * time.Millisecond,
+ }).Result()
+ Expect(err).To(Equal(redis.Nil))
+ })
+
+ Describe("group", func() {
+ BeforeEach(func() {
+ err := client.XGroupCreate(ctx, "stream", "group", "0").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ res, err := client.XReadGroup(ctx, &redis.XReadGroupArgs{
+ Group: "group",
+ Consumer: "consumer",
+ Streams: []string{"stream", ">"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ },
+ },
+ }))
+ })
+
+ AfterEach(func() {
+ n, err := client.XGroupDestroy(ctx, "stream", "group").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ })
+
+ It("should XReadGroup skip empty", func() {
+ n, err := client.XDel(ctx, "stream", "2-0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ res, err := client.XReadGroup(ctx, &redis.XReadGroupArgs{
+ Group: "group",
+ Consumer: "consumer",
+ Streams: []string{"stream", "0"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: nil},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ },
+ },
+ }))
+ })
+
+ It("should XGroupCreateMkStream", func() {
+ err := client.XGroupCreateMkStream(ctx, "stream2", "group", "0").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.XGroupCreateMkStream(ctx, "stream2", "group", "0").Err()
+ Expect(err).To(Equal(proto.RedisError("BUSYGROUP Consumer Group name already exists")))
+
+ n, err := client.XGroupDestroy(ctx, "stream2", "group").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Del(ctx, "stream2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ })
+
+ It("should XPending", func() {
+ info, err := client.XPending(ctx, "stream", "group").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(Equal(&redis.XPending{
+ Count: 3,
+ Lower: "1-0",
+ Higher: "3-0",
+ Consumers: map[string]int64{"consumer": 3},
+ }))
+ args := &redis.XPendingExtArgs{
+ Stream: "stream",
+ Group: "group",
+ Start: "-",
+ End: "+",
+ Count: 10,
+ Consumer: "consumer",
+ }
+ infoExt, err := client.XPendingExt(ctx, args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ for i := range infoExt {
+ infoExt[i].Idle = 0
+ }
+ Expect(infoExt).To(Equal([]redis.XPendingExt{
+ {ID: "1-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+ {ID: "2-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+ {ID: "3-0", Consumer: "consumer", Idle: 0, RetryCount: 1},
+ }))
+
+ args.Idle = 72 * time.Hour
+ infoExt, err = client.XPendingExt(ctx, args).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(infoExt).To(HaveLen(0))
+ })
+
+ It("should XGroup Create Delete Consumer", func() {
+ n, err := client.XGroupCreateConsumer(ctx, "stream", "group", "c1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.XGroupDelConsumer(ctx, "stream", "group", "consumer").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+ })
+
+ It("should XAutoClaim", func() {
+ xca := &redis.XAutoClaimArgs{
+ Stream: "stream",
+ Group: "group",
+ Consumer: "consumer",
+ Start: "-",
+ Count: 2,
+ }
+ msgs, start, err := client.XAutoClaim(ctx, xca).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(start).To(Equal("3-0"))
+ Expect(msgs).To(Equal([]redis.XMessage{{
+ ID: "1-0",
+ Values: map[string]interface{}{"uno": "un"},
+ }, {
+ ID: "2-0",
+ Values: map[string]interface{}{"dos": "deux"},
+ }}))
+
+ xca.Start = start
+ msgs, start, err = client.XAutoClaim(ctx, xca).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(start).To(Equal("0-0"))
+ Expect(msgs).To(Equal([]redis.XMessage{{
+ ID: "3-0",
+ Values: map[string]interface{}{"tres": "troix"},
+ }}))
+
+ ids, start, err := client.XAutoClaimJustID(ctx, xca).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(start).To(Equal("0-0"))
+ Expect(ids).To(Equal([]string{"3-0"}))
+ })
+
+ It("should XClaim", func() {
+ msgs, err := client.XClaim(ctx, &redis.XClaimArgs{
+ Stream: "stream",
+ Group: "group",
+ Consumer: "consumer",
+ Messages: []string{"1-0", "2-0", "3-0"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msgs).To(Equal([]redis.XMessage{{
+ ID: "1-0",
+ Values: map[string]interface{}{"uno": "un"},
+ }, {
+ ID: "2-0",
+ Values: map[string]interface{}{"dos": "deux"},
+ }, {
+ ID: "3-0",
+ Values: map[string]interface{}{"tres": "troix"},
+ }}))
+
+ ids, err := client.XClaimJustID(ctx, &redis.XClaimArgs{
+ Stream: "stream",
+ Group: "group",
+ Consumer: "consumer",
+ Messages: []string{"1-0", "2-0", "3-0"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ids).To(Equal([]string{"1-0", "2-0", "3-0"}))
+ })
+
+ It("should XAck", func() {
+ n, err := client.XAck(ctx, "stream", "group", "1-0", "2-0", "4-0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+ })
+
+ Describe("xinfo", func() {
+ BeforeEach(func() {
+ err := client.XGroupCreate(ctx, "stream", "group1", "0").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ res, err := client.XReadGroup(ctx, &redis.XReadGroupArgs{
+ Group: "group1",
+ Consumer: "consumer1",
+ Streams: []string{"stream", ">"},
+ Count: 2,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ },
+ },
+ }))
+
+ res, err = client.XReadGroup(ctx, &redis.XReadGroupArgs{
+ Group: "group1",
+ Consumer: "consumer2",
+ Streams: []string{"stream", ">"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ },
+ },
+ }))
+
+ err = client.XGroupCreate(ctx, "stream", "group2", "1-0").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ res, err = client.XReadGroup(ctx, &redis.XReadGroupArgs{
+ Group: "group2",
+ Consumer: "consumer1",
+ Streams: []string{"stream", ">"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XStream{
+ {
+ Stream: "stream",
+ Messages: []redis.XMessage{
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ },
+ },
+ }))
+ })
+
+ AfterEach(func() {
+ n, err := client.XGroupDestroy(ctx, "stream", "group1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ n, err = client.XGroupDestroy(ctx, "stream", "group2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ })
+
+ It("should XINFO STREAM", func() {
+ res, err := client.XInfoStream(ctx, "stream").Result()
+ Expect(err).NotTo(HaveOccurred())
+ res.RadixTreeKeys = 0
+ res.RadixTreeNodes = 0
+
+ Expect(res).To(Equal(&redis.XInfoStream{
+ Length: 3,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ FirstEntry: redis.XMessage{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ LastEntry: redis.XMessage{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ }))
+
+ // stream is empty
+ n, err := client.XDel(ctx, "stream", "1-0", "2-0", "3-0").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(3)))
+
+ res, err = client.XInfoStream(ctx, "stream").Result()
+ Expect(err).NotTo(HaveOccurred())
+ res.RadixTreeKeys = 0
+ res.RadixTreeNodes = 0
+
+ Expect(res).To(Equal(&redis.XInfoStream{
+ Length: 0,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ FirstEntry: redis.XMessage{},
+ LastEntry: redis.XMessage{},
+ }))
+ })
+
+ It("should XINFO STREAM FULL", func() {
+ res, err := client.XInfoStreamFull(ctx, "stream", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ res.RadixTreeKeys = 0
+ res.RadixTreeNodes = 0
+
+ // Verify DeliveryTime
+ now := time.Now()
+ maxElapsed := 10 * time.Minute
+ for k, g := range res.Groups {
+ for k2, p := range g.Pending {
+ Expect(now.Sub(p.DeliveryTime)).To(BeNumerically("<=", maxElapsed))
+ res.Groups[k].Pending[k2].DeliveryTime = time.Time{}
+ }
+ for k3, c := range g.Consumers {
+ Expect(now.Sub(c.SeenTime)).To(BeNumerically("<=", maxElapsed))
+ res.Groups[k].Consumers[k3].SeenTime = time.Time{}
+
+ for k4, p := range c.Pending {
+ Expect(now.Sub(p.DeliveryTime)).To(BeNumerically("<=", maxElapsed))
+ res.Groups[k].Consumers[k3].Pending[k4].DeliveryTime = time.Time{}
+ }
+ }
+ }
+
+ Expect(res).To(Equal(&redis.XInfoStreamFull{
+ Length: 3,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ LastGeneratedID: "3-0",
+ Entries: []redis.XMessage{
+ {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
+ {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
+ },
+ Groups: []redis.XInfoStreamGroup{
+ {
+ Name: "group1",
+ LastDeliveredID: "3-0",
+ PelCount: 3,
+ Pending: []redis.XInfoStreamGroupPending{
+ {
+ ID: "1-0",
+ Consumer: "consumer1",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ {
+ ID: "2-0",
+ Consumer: "consumer1",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {
+ ID: "1-0",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ {
+ ID: "2-0",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ },
+ },
+ {
+ Name: "consumer2",
+ SeenTime: time.Time{},
+ PelCount: 1,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {
+ ID: "3-0",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: "group2",
+ LastDeliveredID: "3-0",
+ PelCount: 2,
+ Pending: []redis.XInfoStreamGroupPending{
+ {
+ ID: "2-0",
+ Consumer: "consumer1",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ {
+ ID: "3-0",
+ Consumer: "consumer1",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {
+ ID: "2-0",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ {
+ ID: "3-0",
+ DeliveryTime: time.Time{},
+ DeliveryCount: 1,
+ },
+ },
+ },
+ },
+ },
+ },
+ }))
+ })
+
+ It("should XINFO GROUPS", func() {
+ res, err := client.XInfoGroups(ctx, "stream").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]redis.XInfoGroup{
+ {Name: "group1", Consumers: 2, Pending: 3, LastDeliveredID: "3-0"},
+ {Name: "group2", Consumers: 1, Pending: 2, LastDeliveredID: "3-0"},
+ }))
+ })
+
+ It("should XINFO CONSUMERS", func() {
+ res, err := client.XInfoConsumers(ctx, "stream", "group1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ for i := range res {
+ res[i].Idle = 0
+ }
+ Expect(res).To(Equal([]redis.XInfoConsumer{
+ {Name: "consumer1", Pending: 2, Idle: 0},
+ {Name: "consumer2", Pending: 1, Idle: 0},
+ }))
+ })
+ })
+ })
+
+ Describe("Geo add and radius search", func() {
+ BeforeEach(func() {
+ n, err := client.GeoAdd(
+ ctx,
+ "Sicily",
+ &redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+ &redis.GeoLocation{Longitude: 15.087269, Latitude: 37.502669, Name: "Catania"},
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should not add same geo location", func() {
+ geoAdd := client.GeoAdd(
+ ctx,
+ "Sicily",
+ &redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+ )
+ Expect(geoAdd.Err()).NotTo(HaveOccurred())
+ Expect(geoAdd.Val()).To(Equal(int64(0)))
+ })
+
+ It("should search geo radius", func() {
+ res, err := client.GeoRadius(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[0].Name).To(Equal("Palermo"))
+ Expect(res[1].Name).To(Equal("Catania"))
+ })
+
+ It("should geo radius and store the result", func() {
+ n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Store: "result",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ res, err := client.ZRangeWithScores(ctx, "result", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainElement(redis.Z{
+ Score: 3.479099956230698e+15,
+ Member: "Palermo",
+ }))
+ Expect(res).To(ContainElement(redis.Z{
+ Score: 3.479447370796909e+15,
+ Member: "Catania",
+ }))
+ })
+
+ It("should geo radius and store dist", func() {
+ n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ StoreDist: "result",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ res, err := client.ZRangeWithScores(ctx, "result", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainElement(redis.Z{
+ Score: 190.44242984775784,
+ Member: "Palermo",
+ }))
+ Expect(res).To(ContainElement(redis.Z{
+ Score: 56.4412578701582,
+ Member: "Catania",
+ }))
+ })
+
+ It("should search geo radius with options", func() {
+ res, err := client.GeoRadius(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(190.4424))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(56.4413))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ })
+
+ It("should search geo radius with WithDist=false", func() {
+ res, err := client.GeoRadius(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(float64(0)))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(float64(0)))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ })
+
+ It("should search geo radius by member with options", func() {
+ res, err := client.GeoRadiusByMember(ctx, "Sicily", "Catania", &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(0.0))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(166.2742))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ })
+
+ It("should search geo radius with no results", func() {
+ res, err := client.GeoRadius(ctx, "Sicily", 99, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(0))
+ })
+
+ It("should get geo distance with unit options", func() {
+ // From Redis CLI, note the difference in rounding in m vs
+ // km on Redis itself.
+ //
+ // GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+ // GEODIST Sicily Palermo Catania m
+ // "166274.15156960033"
+ // GEODIST Sicily Palermo Catania km
+ // "166.27415156960032"
+ dist, err := client.GeoDist(ctx, "Sicily", "Palermo", "Catania", "km").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dist).To(BeNumerically("~", 166.27, 0.01))
+
+ dist, err = client.GeoDist(ctx, "Sicily", "Palermo", "Catania", "m").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dist).To(BeNumerically("~", 166274.15, 0.01))
+ })
+
+ It("should get geo hash in string representation", func() {
+ hashes, err := client.GeoHash(ctx, "Sicily", "Palermo", "Catania").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(hashes).To(ConsistOf([]string{"sqc8b49rny0", "sqdtr74hyu0"}))
+ })
+
+ It("should return geo position", func() {
+ pos, err := client.GeoPos(ctx, "Sicily", "Palermo", "Catania", "NonExisting").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(ConsistOf([]*redis.GeoPos{
+ {
+ Longitude: 13.361389338970184,
+ Latitude: 38.1155563954963,
+ },
+ {
+ Longitude: 15.087267458438873,
+ Latitude: 37.50266842333162,
+ },
+ nil,
+ }))
+ })
+
+ It("should geo search", func() {
+ q := &redis.GeoSearchQuery{
+ Member: "Catania",
+ BoxWidth: 400,
+ BoxHeight: 100,
+ BoxUnit: "km",
+ Sort: "asc",
+ }
+ val, err := client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.BoxHeight = 400
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania", "Palermo"}))
+
+ q.Count = 1
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.CountAny = true
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Palermo"}))
+
+ q = &redis.GeoSearchQuery{
+ Member: "Catania",
+ Radius: 100,
+ RadiusUnit: "km",
+ Sort: "asc",
+ }
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.Radius = 400
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania", "Palermo"}))
+
+ q.Count = 1
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.CountAny = true
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Palermo"}))
+
+ q = &redis.GeoSearchQuery{
+ Longitude: 15,
+ Latitude: 37,
+ BoxWidth: 200,
+ BoxHeight: 200,
+ BoxUnit: "km",
+ Sort: "asc",
+ }
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.BoxWidth, q.BoxHeight = 400, 400
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania", "Palermo"}))
+
+ q.Count = 1
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.CountAny = true
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Palermo"}))
+
+ q = &redis.GeoSearchQuery{
+ Longitude: 15,
+ Latitude: 37,
+ Radius: 100,
+ RadiusUnit: "km",
+ Sort: "asc",
+ }
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.Radius = 200
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania", "Palermo"}))
+
+ q.Count = 1
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Catania"}))
+
+ q.CountAny = true
+ val, err = client.GeoSearch(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]string{"Palermo"}))
+ })
+
+ It("should geo search with options", func() {
+ q := &redis.GeoSearchLocationQuery{
+ GeoSearchQuery: redis.GeoSearchQuery{
+ Longitude: 15,
+ Latitude: 37,
+ Radius: 200,
+ RadiusUnit: "km",
+ Sort: "asc",
+ },
+ WithHash: true,
+ WithDist: true,
+ WithCoord: true,
+ }
+ val, err := client.GeoSearchLocation(ctx, "Sicily", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.GeoLocation{
+ {
+ Name: "Catania",
+ Longitude: 15.08726745843887329,
+ Latitude: 37.50266842333162032,
+ Dist: 56.4413,
+ GeoHash: 3479447370796909,
+ },
+ {
+ Name: "Palermo",
+ Longitude: 13.36138933897018433,
+ Latitude: 38.11555639549629859,
+ Dist: 190.4424,
+ GeoHash: 3479099956230698,
+ },
+ }))
+ })
+
+ It("should geo search store", func() {
+ q := &redis.GeoSearchStoreQuery{
+ GeoSearchQuery: redis.GeoSearchQuery{
+ Longitude: 15,
+ Latitude: 37,
+ Radius: 200,
+ RadiusUnit: "km",
+ Sort: "asc",
+ },
+ StoreDist: false,
+ }
+
+ val, err := client.GeoSearchStore(ctx, "Sicily", "key1", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(2)))
+
+ q.StoreDist = true
+ val, err = client.GeoSearchStore(ctx, "Sicily", "key2", q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(2)))
+
+ loc, err := client.GeoSearchLocation(ctx, "key1", &redis.GeoSearchLocationQuery{
+ GeoSearchQuery: q.GeoSearchQuery,
+ WithCoord: true,
+ WithDist: true,
+ WithHash: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(loc).To(Equal([]redis.GeoLocation{
+ {
+ Name: "Catania",
+ Longitude: 15.08726745843887329,
+ Latitude: 37.50266842333162032,
+ Dist: 56.4413,
+ GeoHash: 3479447370796909,
+ },
+ {
+ Name: "Palermo",
+ Longitude: 13.36138933897018433,
+ Latitude: 38.11555639549629859,
+ Dist: 190.4424,
+ GeoHash: 3479099956230698,
+ },
+ }))
+
+ v, err := client.ZRangeWithScores(ctx, "key2", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]redis.Z{
+ {
+ Score: 56.441257870158204,
+ Member: "Catania",
+ },
+ {
+ Score: 190.44242984775784,
+ Member: "Palermo",
+ },
+ }))
+ })
+ })
+
+ Describe("marshaling/unmarshaling", func() {
+ type convTest struct {
+ value interface{}
+ wanted string
+ dest interface{}
+ }
+
+ convTests := []convTest{
+ {nil, "", nil},
+ {"hello", "hello", new(string)},
+ {[]byte("hello"), "hello", new([]byte)},
+ {int(1), "1", new(int)},
+ {int8(1), "1", new(int8)},
+ {int16(1), "1", new(int16)},
+ {int32(1), "1", new(int32)},
+ {int64(1), "1", new(int64)},
+ {uint(1), "1", new(uint)},
+ {uint8(1), "1", new(uint8)},
+ {uint16(1), "1", new(uint16)},
+ {uint32(1), "1", new(uint32)},
+ {uint64(1), "1", new(uint64)},
+ {float32(1.0), "1", new(float32)},
+ {float64(1.0), "1", new(float64)},
+ {true, "1", new(bool)},
+ {false, "0", new(bool)},
+ }
+
+ It("should convert to string", func() {
+ for _, test := range convTests {
+ err := client.Set(ctx, "key", test.value, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ s, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal(test.wanted))
+
+ if test.dest == nil {
+ continue
+ }
+
+ err = client.Get(ctx, "key").Scan(test.dest)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(deref(test.dest)).To(Equal(test.value))
+ }
+ })
+ })
+
+ Describe("json marshaling/unmarshaling", func() {
+ BeforeEach(func() {
+ value := &numberStruct{Number: 42}
+ err := client.Set(ctx, "key", value, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should marshal custom values using json", func() {
+ s, err := client.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal(`{"Number":42}`))
+ })
+
+ It("should scan custom values using json", func() {
+ value := &numberStruct{}
+ err := client.Get(ctx, "key").Scan(value)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(value.Number).To(Equal(42))
+ })
+ })
+
+ Describe("Eval", func() {
+ It("returns keys and values", func() {
+ vals, err := client.Eval(
+ ctx,
+ "return {KEYS[1],ARGV[1]}",
+ []string{"key"},
+ "hello",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"key", "hello"}))
+ })
+
+ It("returns all values after an error", func() {
+ vals, err := client.Eval(
+ ctx,
+ `return {12, {err="error"}, "abc"}`,
+ nil,
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{int64(12), proto.RedisError("error"), "abc"}))
+ })
+ })
+
+ Describe("SlowLogGet", func() {
+ It("returns slow query result", func() {
+ const key = "slowlog-log-slower-than"
+
+ old := client.ConfigGet(ctx, key).Val()
+ client.ConfigSet(ctx, key, "0")
+ defer client.ConfigSet(ctx, key, old[1].(string))
+
+ err := client.Do(ctx, "slowlog", "reset").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ client.Set(ctx, "test", "true", 0)
+
+ result, err := client.SlowLogGet(ctx, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).NotTo(BeZero())
+ })
+ })
+})
+
+type numberStruct struct {
+ Number int
+}
+
+func (s *numberStruct) MarshalBinary() ([]byte, error) {
+ return json.Marshal(s)
+}
+
+func (s *numberStruct) UnmarshalBinary(b []byte) error {
+ return json.Unmarshal(b, s)
+}
+
+func deref(viface interface{}) interface{} {
+ v := reflect.ValueOf(viface)
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go
new file mode 100644
index 0000000..5526253
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go
new file mode 100644
index 0000000..521594b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go
@@ -0,0 +1,144 @@
+package redis
+
+import (
+ "context"
+ "io"
+ "net"
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// ErrClosed performs any operation on the closed client will return this error.
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+ error
+
+ // RedisError is a no-op function but
+ // serves to distinguish types that are Redis
+ // errors from ordinary errors: a type is a
+ // Redis error if it has a RedisError method.
+ RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func shouldRetry(err error, retryTimeout bool) bool {
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return true
+ case nil, context.Canceled, context.DeadlineExceeded:
+ return false
+ }
+
+ if v, ok := err.(timeoutError); ok {
+ if v.Timeout() {
+ return retryTimeout
+ }
+ return true
+ }
+
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ if strings.HasPrefix(s, "TRYAGAIN ") {
+ return true
+ }
+
+ return false
+}
+
+func isRedisError(err error) bool {
+ _, ok := err.(proto.RedisError)
+ return ok
+}
+
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+ switch err {
+ case nil:
+ return false
+ case context.Canceled, context.DeadlineExceeded:
+ return true
+ }
+
+ if isRedisError(err) {
+ switch {
+ case isReadOnlyError(err):
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return true
+ case isMovedSameConnAddr(err, addr):
+ // Close connections when we are asked to move to the same addr
+ // of the connection. Force a DNS resolution when all connections
+ // of the pool are recycled
+ return true
+ default:
+ return false
+ }
+ }
+
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return !netErr.Temporary()
+ }
+ }
+
+ return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+ if !isRedisError(err) {
+ return
+ }
+
+ s := err.Error()
+ switch {
+ case strings.HasPrefix(s, "MOVED "):
+ moved = true
+ case strings.HasPrefix(s, "ASK "):
+ ask = true
+ default:
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+ addr = s[ind+1:]
+ return
+}
+
+func isLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+ return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+func isMovedSameConnAddr(err error, addr string) bool {
+ redisError := err.Error()
+ if !strings.HasPrefix(redisError, "MOVED ") {
+ return false
+ }
+ return strings.HasSuffix(redisError, " "+addr)
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+ Timeout() bool
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go
new file mode 100644
index 0000000..d66edce
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go
@@ -0,0 +1,80 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/go-redis/redis/v8"
+)
+
+type redisHook struct{}
+
+var _ redis.Hook = redisHook{}
+
+func (redisHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ fmt.Printf("starting processing: <%s>\n", cmd)
+ return ctx, nil
+}
+
+func (redisHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
+ fmt.Printf("finished processing: <%s>\n", cmd)
+ return nil
+}
+
+func (redisHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ fmt.Printf("pipeline starting processing: %v\n", cmds)
+ return ctx, nil
+}
+
+func (redisHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
+ fmt.Printf("pipeline finished processing: %v\n", cmds)
+ return nil
+}
+
+func Example_instrumentation() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ })
+ rdb.AddHook(redisHook{})
+
+ rdb.Ping(ctx)
+ // Output: starting processing: <ping: >
+ // finished processing: <ping: PONG>
+}
+
+func ExamplePipeline_instrumentation() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ })
+ rdb.AddHook(redisHook{})
+
+ rdb.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ pipe.Ping(ctx)
+ return nil
+ })
+ // Output: pipeline starting processing: [ping: ping: ]
+ // pipeline finished processing: [ping: PONG ping: PONG]
+}
+
+func ExampleClient_Watch_instrumentation() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ })
+ rdb.AddHook(redisHook{})
+
+ rdb.Watch(ctx, func(tx *redis.Tx) error {
+ tx.Ping(ctx)
+ tx.Ping(ctx)
+ return nil
+ }, "foo")
+ // Output:
+ // starting processing: <watch foo: >
+ // finished processing: <watch foo: OK>
+ // starting processing: <ping: >
+ // finished processing: <ping: PONG>
+ // starting processing: <ping: >
+ // finished processing: <ping: PONG>
+ // starting processing: <unwatch: >
+ // finished processing: <unwatch: OK>
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go
new file mode 100644
index 0000000..f015809
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go
@@ -0,0 +1,634 @@
+package redis_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var (
+ ctx = context.Background()
+ rdb *redis.Client
+)
+
+func init() {
+ rdb = redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ })
+}
+
+func ExampleNewClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379", // use default Addr
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ pong, err := rdb.Ping(ctx).Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleParseURL() {
+ opt, err := redis.ParseURL("redis://:qwerty@localhost:6379/1?dial_timeout=5s")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("addr is", opt.Addr)
+ fmt.Println("db is", opt.DB)
+ fmt.Println("password is", opt.Password)
+ fmt.Println("dial timeout is", opt.DialTimeout)
+
+ // Create client as usually.
+ _ = redis.NewClient(opt)
+
+ // Output: addr is localhost:6379
+ // db is 1
+ // password is qwerty
+ // dial timeout is 5s
+}
+
+func ExampleNewFailoverClient() {
+ // See http://redis.io/topics/sentinel for instructions how to
+ // setup Redis Sentinel.
+ rdb := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "master",
+ SentinelAddrs: []string{":26379"},
+ })
+ rdb.Ping(ctx)
+}
+
+func ExampleNewClusterClient() {
+ // See http://redis.io/topics/cluster-tutorial for instructions
+ // how to setup Redis Cluster.
+ rdb := redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+ })
+ rdb.Ping(ctx)
+}
+
+// Following example creates a cluster from 2 master nodes and 2 slave nodes
+// without using cluster mode or Redis Sentinel.
+func ExampleNewClusterClient_manualSetup() {
+ // clusterSlots returns cluster slots information.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ clusterSlots := func(ctx context.Context) ([]redis.ClusterSlot, error) {
+ slots := []redis.ClusterSlot{
+ // First node with 1 master and 1 slave.
+ {
+ Start: 0,
+ End: 8191,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":7000", // master
+ }, {
+ Addr: ":8000", // 1st slave
+ }},
+ },
+ // Second node with 1 master and 1 slave.
+ {
+ Start: 8192,
+ End: 16383,
+ Nodes: []redis.ClusterNode{{
+ Addr: ":7001", // master
+ }, {
+ Addr: ":8001", // 1st slave
+ }},
+ },
+ }
+ return slots, nil
+ }
+
+ rdb := redis.NewClusterClient(&redis.ClusterOptions{
+ ClusterSlots: clusterSlots,
+ RouteRandomly: true,
+ })
+ rdb.Ping(ctx)
+
+ // ReloadState reloads cluster state. It calls ClusterSlots func
+ // to get cluster slots information.
+ rdb.ReloadState(ctx)
+}
+
+func ExampleNewRing() {
+ rdb := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "shard1": ":7000",
+ "shard2": ":7001",
+ "shard3": ":7002",
+ },
+ })
+ rdb.Ping(ctx)
+}
+
+func ExampleClient() {
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "missing_key").Result()
+ if err == redis.Nil {
+ fmt.Println("missing_key does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("missing_key", val2)
+ }
+ // Output: key value
+ // missing_key does not exist
+}
+
+func ExampleConn() {
+ conn := rdb.Conn(context.Background())
+
+ err := conn.ClientSetName(ctx, "foobar").Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // Open other connections.
+ for i := 0; i < 10; i++ {
+ go rdb.Ping(ctx)
+ }
+
+ s, err := conn.ClientGetName(ctx).Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(s)
+ // Output: foobar
+}
+
+func ExampleClient_Set() {
+ // Last argument is expiration. Zero means the key has no
+ // expiration time.
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // key2 will expire in an hour.
+ err = rdb.Set(ctx, "key2", "value", time.Hour).Err()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleClient_SetEX() {
+ err := rdb.SetEX(ctx, "key", "value", time.Hour).Err()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleClient_Incr() {
+ result, err := rdb.Incr(ctx, "counter").Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(result)
+ // Output: 1
+}
+
+func ExampleClient_BLPop() {
+ if err := rdb.RPush(ctx, "queue", "message").Err(); err != nil {
+ panic(err)
+ }
+
+ // use `rdb.BLPop(0, "queue")` for infinite waiting time
+ result, err := rdb.BLPop(ctx, 1*time.Second, "queue").Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(result[0], result[1])
+ // Output: queue message
+}
+
+func ExampleClient_Scan() {
+ rdb.FlushDB(ctx)
+ for i := 0; i < 33; i++ {
+ err := rdb.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ var cursor uint64
+ var n int
+ for {
+ var keys []string
+ var err error
+ keys, cursor, err = rdb.Scan(ctx, cursor, "key*", 10).Result()
+ if err != nil {
+ panic(err)
+ }
+ n += len(keys)
+ if cursor == 0 {
+ break
+ }
+ }
+
+ fmt.Printf("found %d keys\n", n)
+ // Output: found 33 keys
+}
+
+func ExampleClient_ScanType() {
+ rdb.FlushDB(ctx)
+ for i := 0; i < 33; i++ {
+ err := rdb.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ var cursor uint64
+ var n int
+ for {
+ var keys []string
+ var err error
+ keys, cursor, err = rdb.ScanType(ctx, cursor, "key*", 10, "string").Result()
+ if err != nil {
+ panic(err)
+ }
+ n += len(keys)
+ if cursor == 0 {
+ break
+ }
+ }
+
+ fmt.Printf("found %d keys\n", n)
+ // Output: found 33 keys
+}
+
+// ExampleStringStringMapCmd_Scan shows how to scan the results of a map fetch
+// into a struct.
+func ExampleStringStringMapCmd_Scan() {
+ rdb.FlushDB(ctx)
+ err := rdb.HMSet(ctx, "map",
+ "name", "hello",
+ "count", 123,
+ "correct", true).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get the map. The same approach works for HmGet().
+ res := rdb.HGetAll(ctx, "map")
+ if res.Err() != nil {
+ panic(err)
+ }
+
+ type data struct {
+ Name string `redis:"name"`
+ Count int `redis:"count"`
+ Correct bool `redis:"correct"`
+ }
+
+ // Scan the results into the struct.
+ var d data
+ if err := res.Scan(&d); err != nil {
+ panic(err)
+ }
+
+ fmt.Println(d)
+ // Output: {hello 123 true}
+}
+
+// ExampleSliceCmd_Scan shows how to scan the results of a multi key fetch
+// into a struct.
+func ExampleSliceCmd_Scan() {
+ rdb.FlushDB(ctx)
+ err := rdb.MSet(ctx,
+ "name", "hello",
+ "count", 123,
+ "correct", true).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ res := rdb.MGet(ctx, "name", "count", "empty", "correct")
+ if res.Err() != nil {
+ panic(err)
+ }
+
+ type data struct {
+ Name string `redis:"name"`
+ Count int `redis:"count"`
+ Correct bool `redis:"correct"`
+ }
+
+ // Scan the results into the struct.
+ var d data
+ if err := res.Scan(&d); err != nil {
+ panic(err)
+ }
+
+ fmt.Println(d)
+ // Output: {hello 123 true}
+}
+
+func ExampleClient_Pipelined() {
+ var incr *redis.IntCmd
+ _, err := rdb.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ incr = pipe.Incr(ctx, "pipelined_counter")
+ pipe.Expire(ctx, "pipelined_counter", time.Hour)
+ return nil
+ })
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_Pipeline() {
+ pipe := rdb.Pipeline()
+
+ incr := pipe.Incr(ctx, "pipeline_counter")
+ pipe.Expire(ctx, "pipeline_counter", time.Hour)
+
+ // Execute
+ //
+ // INCR pipeline_counter
+ // EXPIRE pipeline_counts 3600
+ //
+ // using one rdb-server roundtrip.
+ _, err := pipe.Exec(ctx)
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_TxPipelined() {
+ var incr *redis.IntCmd
+ _, err := rdb.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ incr = pipe.Incr(ctx, "tx_pipelined_counter")
+ pipe.Expire(ctx, "tx_pipelined_counter", time.Hour)
+ return nil
+ })
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_TxPipeline() {
+ pipe := rdb.TxPipeline()
+
+ incr := pipe.Incr(ctx, "tx_pipeline_counter")
+ pipe.Expire(ctx, "tx_pipeline_counter", time.Hour)
+
+ // Execute
+ //
+ // MULTI
+ // INCR pipeline_counter
+ // EXPIRE pipeline_counts 3600
+ // EXEC
+ //
+ // using one rdb-server roundtrip.
+ _, err := pipe.Exec(ctx)
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_Watch() {
+ const maxRetries = 1000
+
+ // Increment transactionally increments key using GET and SET commands.
+ increment := func(key string) error {
+ // Transactional function.
+ txf := func(tx *redis.Tx) error {
+ // Get current value or zero.
+ n, err := tx.Get(ctx, key).Int()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ // Actual opperation (local in optimistic lock).
+ n++
+
+ // Operation is committed only if the watched keys remain unchanged.
+ _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, key, n, 0)
+ return nil
+ })
+ return err
+ }
+
+ for i := 0; i < maxRetries; i++ {
+ err := rdb.Watch(ctx, txf, key)
+ if err == nil {
+ // Success.
+ return nil
+ }
+ if err == redis.TxFailedErr {
+ // Optimistic lock lost. Retry.
+ continue
+ }
+ // Return any other error.
+ return err
+ }
+
+ return errors.New("increment reached maximum number of retries")
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ if err := increment("counter3"); err != nil {
+ fmt.Println("increment error:", err)
+ }
+ }()
+ }
+ wg.Wait()
+
+ n, err := rdb.Get(ctx, "counter3").Int()
+ fmt.Println("ended with", n, err)
+ // Output: ended with 100 <nil>
+}
+
+func ExamplePubSub() {
+ pubsub := rdb.Subscribe(ctx, "mychannel1")
+
+ // Wait for confirmation that subscription is created before publishing anything.
+ _, err := pubsub.Receive(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ // Go channel which receives messages.
+ ch := pubsub.Channel()
+
+ // Publish a message.
+ err = rdb.Publish(ctx, "mychannel1", "hello").Err()
+ if err != nil {
+ panic(err)
+ }
+
+ time.AfterFunc(time.Second, func() {
+ // When pubsub is closed channel is closed too.
+ _ = pubsub.Close()
+ })
+
+ // Consume messages.
+ for msg := range ch {
+ fmt.Println(msg.Channel, msg.Payload)
+ }
+
+ // Output: mychannel1 hello
+}
+
+func ExamplePubSub_Receive() {
+ pubsub := rdb.Subscribe(ctx, "mychannel2")
+ defer pubsub.Close()
+
+ for i := 0; i < 2; i++ {
+ // ReceiveTimeout is a low level API. Use ReceiveMessage instead.
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ if err != nil {
+ break
+ }
+
+ switch msg := msgi.(type) {
+ case *redis.Subscription:
+ fmt.Println("subscribed to", msg.Channel)
+
+ _, err := rdb.Publish(ctx, "mychannel2", "hello").Result()
+ if err != nil {
+ panic(err)
+ }
+ case *redis.Message:
+ fmt.Println("received", msg.Payload, "from", msg.Channel)
+ default:
+ panic("unreached")
+ }
+ }
+
+ // sent message to 1 rdb
+ // received hello from mychannel2
+}
+
+func ExampleScript() {
+ IncrByXX := redis.NewScript(`
+ if redis.call("GET", KEYS[1]) ~= false then
+ return redis.call("INCRBY", KEYS[1], ARGV[1])
+ end
+ return false
+ `)
+
+ n, err := IncrByXX.Run(ctx, rdb, []string{"xx_counter"}, 2).Result()
+ fmt.Println(n, err)
+
+ err = rdb.Set(ctx, "xx_counter", "40", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ n, err = IncrByXX.Run(ctx, rdb, []string{"xx_counter"}, 2).Result()
+ fmt.Println(n, err)
+
+ // Output: <nil> redis: nil
+ // 42 <nil>
+}
+
+func Example_customCommand() {
+ Get := func(ctx context.Context, rdb *redis.Client, key string) *redis.StringCmd {
+ cmd := redis.NewStringCmd(ctx, "get", key)
+ rdb.Process(ctx, cmd)
+ return cmd
+ }
+
+ v, err := Get(ctx, rdb, "key_does_not_exist").Result()
+ fmt.Printf("%q %s", v, err)
+ // Output: "" redis: nil
+}
+
+func Example_customCommand2() {
+ v, err := rdb.Do(ctx, "get", "key_does_not_exist").Text()
+ fmt.Printf("%q %s", v, err)
+ // Output: "" redis: nil
+}
+
+func ExampleScanIterator() {
+ iter := rdb.Scan(ctx, 0, "", 0).Iterator()
+ for iter.Next(ctx) {
+ fmt.Println(iter.Val())
+ }
+ if err := iter.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func ExampleScanCmd_Iterator() {
+ iter := rdb.Scan(ctx, 0, "", 0).Iterator()
+ for iter.Next(ctx) {
+ fmt.Println(iter.Val())
+ }
+ if err := iter.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func ExampleNewUniversalClient_simple() {
+ rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{":6379"},
+ })
+ defer rdb.Close()
+
+ rdb.Ping(ctx)
+}
+
+func ExampleNewUniversalClient_failover() {
+ rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+ MasterName: "master",
+ Addrs: []string{":26379"},
+ })
+ defer rdb.Close()
+
+ rdb.Ping(ctx)
+}
+
+func ExampleNewUniversalClient_cluster() {
+ rdb := redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+ })
+ defer rdb.Close()
+
+ rdb.Ping(ctx)
+}
+
+func ExampleClient_SlowLogGet() {
+ const key = "slowlog-log-slower-than"
+
+ old := rdb.ConfigGet(ctx, key).Val()
+ rdb.ConfigSet(ctx, key, "0")
+ defer rdb.ConfigSet(ctx, key, old[1].(string))
+
+ if err := rdb.Do(ctx, "slowlog", "reset").Err(); err != nil {
+ panic(err)
+ }
+
+ rdb.Set(ctx, "test", "true", 0)
+
+ result, err := rdb.SlowLogGet(ctx, -1).Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(len(result))
+ // Output: 2
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go
new file mode 100644
index 0000000..49c4b94
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go
@@ -0,0 +1,95 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+func (c *baseClient) Pool() pool.Pooler {
+ return c.connPool
+}
+
+func (c *PubSub) SetNetConn(netConn net.Conn) {
+ c.cn = pool.NewConn(netConn)
+}
+
+func (c *ClusterClient) LoadState(ctx context.Context) (*clusterState, error) {
+ // return c.state.Reload(ctx)
+ return c.loadState(ctx)
+}
+
+func (c *ClusterClient) SlotAddrs(ctx context.Context, slot int) []string {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ var addrs []string
+ for _, n := range state.slotNodes(slot) {
+ addrs = append(addrs, n.Client.getAddr())
+ }
+ return addrs
+}
+
+func (c *ClusterClient) Nodes(ctx context.Context, key string) ([]*clusterNode, error) {
+ state, err := c.state.Reload(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ slot := hashtag.Slot(key)
+ nodes := state.slotNodes(slot)
+ if len(nodes) != 2 {
+ return nil, fmt.Errorf("slot=%d does not have enough nodes: %v", slot, nodes)
+ }
+ return nodes, nil
+}
+
+func (c *ClusterClient) SwapNodes(ctx context.Context, key string) error {
+ nodes, err := c.Nodes(ctx, key)
+ if err != nil {
+ return err
+ }
+ nodes[0], nodes[1] = nodes[1], nodes[0]
+ return nil
+}
+
+func (state *clusterState) IsConsistent(ctx context.Context) bool {
+ if len(state.Masters) < 3 {
+ return false
+ }
+ for _, master := range state.Masters {
+ s := master.Client.Info(ctx, "replication").Val()
+ if !strings.Contains(s, "role:master") {
+ return false
+ }
+ }
+
+ if len(state.Slaves) < 3 {
+ return false
+ }
+ for _, slave := range state.Slaves {
+ s := slave.Client.Info(ctx, "replication").Val()
+ if !strings.Contains(s, "role:slave") {
+ return false
+ }
+ }
+
+ return true
+}
+
+func GetSlavesAddrByName(ctx context.Context, c *SentinelClient, name string) []string {
+ addrs, err := c.Slaves(ctx, name).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ name, err)
+ return []string{}
+ }
+ return parseSlaveAddrs(addrs, false)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go
new file mode 100644
index 0000000..3225d24
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go
@@ -0,0 +1,49 @@
+//go:build gofuzz
+// +build gofuzz
+
+package fuzz
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var (
+ ctx = context.Background()
+ rdb *redis.Client
+)
+
+func init() {
+ rdb = redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 10 * time.Second,
+ })
+}
+
+func Fuzz(data []byte) int {
+ arrayLen := len(data)
+ if arrayLen < 4 {
+ return -1
+ }
+ maxIter := int(uint(data[0]))
+ for i := 0; i < maxIter && i < arrayLen; i++ {
+ n := i % arrayLen
+ if n == 0 {
+ _ = rdb.Set(ctx, string(data[i:]), string(data[i:]), 0).Err()
+ } else if n == 1 {
+ _, _ = rdb.Get(ctx, string(data[i:])).Result()
+ } else if n == 2 {
+ _, _ = rdb.Incr(ctx, string(data[i:])).Result()
+ } else if n == 3 {
+ var cursor uint64
+ _, _, _ = rdb.Scan(ctx, cursor, string(data[i:]), 10).Result()
+ }
+ }
+ return 1
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
new file mode 100644
index 0000000..d2610c2
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
@@ -0,0 +1,20 @@
+module github.com/go-redis/redis/v8
+
+go 1.17
+
+require (
+ github.com/cespare/xxhash/v2 v2.1.2
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/gomega v1.18.1
+)
+
+require (
+ github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/nxadm/tail v1.4.8 // indirect
+ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+ golang.org/x/text v0.3.6 // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
new file mode 100644
index 0000000..e88f31a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
@@ -0,0 +1,108 @@
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go
new file mode 100644
index 0000000..b97fa0d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+ switch v := v.(type) {
+ case nil:
+ return append(b, "<nil>"...)
+ case string:
+ return appendUTF8String(b, Bytes(v))
+ case []byte:
+ return appendUTF8String(b, v)
+ case int:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int8:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int16:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int32:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int64:
+ return strconv.AppendInt(b, v, 10)
+ case uint:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint8:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint16:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint32:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint64:
+ return strconv.AppendUint(b, v, 10)
+ case float32:
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+ case float64:
+ return strconv.AppendFloat(b, v, 'f', -1, 64)
+ case bool:
+ if v {
+ return append(b, "true"...)
+ }
+ return append(b, "false"...)
+ case time.Time:
+ return v.AppendFormat(b, time.RFC3339Nano)
+ default:
+ return append(b, fmt.Sprint(v)...)
+ }
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+ dst = append(dst, src...)
+ return dst
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go
new file mode 100644
index 0000000..b3a4f21
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go
@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+func RandomSlot() int {
+ return rand.Intn(slotNumber)
+}
+
+// Slot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ if key == "" {
+ return RandomSlot()
+ }
+ key = Key(key)
+ return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go
new file mode 100644
index 0000000..c0b6396
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go
@@ -0,0 +1,71 @@
+package hashtag
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "hashtag")
+}
+
+var _ = Describe("CRC16", func() {
+ // http://redis.io/topics/cluster-spec#keys-distribution-model
+ It("should calculate CRC16", func() {
+ tests := []struct {
+ s string
+ n uint16
+ }{
+ {"123456789", 0x31C3},
+ {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 21847},
+ }
+
+ for _, test := range tests {
+ Expect(crc16sum(test.s)).To(Equal(test.n), "for %s", test.s)
+ }
+ })
+})
+
+var _ = Describe("HashSlot", func() {
+ It("should calculate hash slots", func() {
+ tests := []struct {
+ key string
+ slot int
+ }{
+ {"123456789", 12739},
+ {"{}foo", 9500},
+ {"foo{}", 5542},
+ {"foo{}{bar}", 8363},
+ {"", 10503},
+ {"", 5176},
+ {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463},
+ }
+ // Empty keys receive random slot.
+ rand.Seed(100)
+
+ for _, test := range tests {
+ Expect(Slot(test.key)).To(Equal(test.slot), "for %s", test.key)
+ }
+ })
+
+ It("should extract keys from tags", func() {
+ tests := []struct {
+ one, two string
+ }{
+ {"foo{bar}", "bar"},
+ {"{foo}bar", "foo"},
+ {"{user1000}.following", "{user1000}.followers"},
+ {"foo{{bar}}zap", "{bar"},
+ {"foo{bar}{zap}", "bar"},
+ }
+
+ for _, test := range tests {
+ Expect(Slot(test.one)).To(Equal(Slot(test.two)), "for %s <-> %s", test.one, test.two)
+ }
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go
new file mode 100644
index 0000000..852c8bd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go
@@ -0,0 +1,201 @@
+package hscan
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+ // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+ decoders = []decoderFunc{
+ reflect.Bool: decodeBool,
+ reflect.Int: decodeInt,
+ reflect.Int8: decodeInt8,
+ reflect.Int16: decodeInt16,
+ reflect.Int32: decodeInt32,
+ reflect.Int64: decodeInt64,
+ reflect.Uint: decodeUint,
+ reflect.Uint8: decodeUint8,
+ reflect.Uint16: decodeUint16,
+ reflect.Uint32: decodeUint32,
+ reflect.Uint64: decodeUint64,
+ reflect.Float32: decodeFloat32,
+ reflect.Float64: decodeFloat64,
+ reflect.Complex64: decodeUnsupported,
+ reflect.Complex128: decodeUnsupported,
+ reflect.Array: decodeUnsupported,
+ reflect.Chan: decodeUnsupported,
+ reflect.Func: decodeUnsupported,
+ reflect.Interface: decodeUnsupported,
+ reflect.Map: decodeUnsupported,
+ reflect.Ptr: decodeUnsupported,
+ reflect.Slice: decodeSlice,
+ reflect.String: decodeString,
+ reflect.Struct: decodeUnsupported,
+ reflect.UnsafePointer: decodeUnsupported,
+ }
+
+ // Global map of struct field specs that is populated once for every new
+ // struct type that is scanned. This caches the field types and the corresponding
+ // decoder functions to avoid iterating through struct fields on subsequent scans.
+ globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+ v := reflect.ValueOf(dst)
+
+ // The destination to scan into should be a struct pointer.
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+ }
+
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+ }
+
+ return StructValue{
+ spec: globalStructMap.get(v.Type()),
+ value: v,
+ }, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+ if len(keys) != len(vals) {
+ return errors.New("args should have the same number of keys and vals")
+ }
+
+ strct, err := Struct(dst)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the (key, value) sequence.
+ for i := 0; i < len(vals); i++ {
+ key, ok := keys[i].(string)
+ if !ok {
+ continue
+ }
+
+ val, ok := vals[i].(string)
+ if !ok {
+ continue
+ }
+
+ if err := strct.Scan(key, val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ f.SetBool(b)
+ return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseInt(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetInt(v)
+ return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseUint(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetUint(v)
+ return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+ f.SetString(s)
+ return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+ // []byte slice ([]uint8).
+ if f.Type().Elem().Kind() == reflect.Uint8 {
+ f.SetBytes([]byte(s))
+ }
+ return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+ return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go
new file mode 100644
index 0000000..ab4c0e1
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go
@@ -0,0 +1,178 @@
+package hscan
+
+import (
+ "math"
+ "strconv"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+type data struct {
+ Omit string `redis:"-"`
+ Empty string
+
+ String string `redis:"string"`
+ Bytes []byte `redis:"byte"`
+ Int int `redis:"int"`
+ Int8 int8 `redis:"int8"`
+ Int16 int16 `redis:"int16"`
+ Int32 int32 `redis:"int32"`
+ Int64 int64 `redis:"int64"`
+ Uint uint `redis:"uint"`
+ Uint8 uint8 `redis:"uint8"`
+ Uint16 uint16 `redis:"uint16"`
+ Uint32 uint32 `redis:"uint32"`
+ Uint64 uint64 `redis:"uint64"`
+ Float float32 `redis:"float"`
+ Float64 float64 `redis:"float64"`
+ Bool bool `redis:"bool"`
+}
+
+type i []interface{}
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "hscan")
+}
+
+var _ = Describe("Scan", func() {
+ It("catches bad args", func() {
+ var d data
+
+ Expect(Scan(&d, i{}, i{})).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{}))
+
+ Expect(Scan(&d, i{"key"}, i{})).To(HaveOccurred())
+ Expect(Scan(&d, i{"key"}, i{"1", "2"})).To(HaveOccurred())
+ Expect(Scan(nil, i{"key", "1"}, i{})).To(HaveOccurred())
+
+ var m map[string]interface{}
+ Expect(Scan(&m, i{"key"}, i{"1"})).To(HaveOccurred())
+ Expect(Scan(data{}, i{"key"}, i{"1"})).To(HaveOccurred())
+ Expect(Scan(data{}, i{"key", "string"}, i{nil, nil})).To(HaveOccurred())
+ })
+
+ It("number out of range", func() {
+ f := func(v uint64) string {
+ return strconv.FormatUint(v, 10) + "1"
+ }
+ keys := i{"int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float", "float64"}
+ vals := i{
+ f(math.MaxInt8), f(math.MaxInt16), f(math.MaxInt32), f(math.MaxInt64),
+ f(math.MaxUint8), f(math.MaxUint16), f(math.MaxUint32), strconv.FormatUint(math.MaxUint64, 10) + "1",
+ "13.4028234663852886e+38", "11.79769313486231570e+308",
+ }
+ for k, v := range keys {
+ var d data
+ Expect(Scan(&d, i{v}, i{vals[k]})).To(HaveOccurred())
+ }
+
+ // success
+ f = func(v uint64) string {
+ return strconv.FormatUint(v, 10)
+ }
+ keys = i{"int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float", "float64"}
+ vals = i{
+ f(math.MaxInt8), f(math.MaxInt16), f(math.MaxInt32), f(math.MaxInt64),
+ f(math.MaxUint8), f(math.MaxUint16), f(math.MaxUint32), strconv.FormatUint(math.MaxUint64, 10),
+ "3.40282346638528859811704183484516925440e+38", "1.797693134862315708145274237317043567981e+308",
+ }
+ var d data
+ Expect(Scan(&d, keys, vals)).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{
+ Int8: math.MaxInt8,
+ Int16: math.MaxInt16,
+ Int32: math.MaxInt32,
+ Int64: math.MaxInt64,
+ Uint8: math.MaxUint8,
+ Uint16: math.MaxUint16,
+ Uint32: math.MaxUint32,
+ Uint64: math.MaxUint64,
+ Float: math.MaxFloat32,
+ Float64: math.MaxFloat64,
+ }))
+ })
+
+ It("scans good values", func() {
+ var d data
+
+ // non-tagged fields.
+ Expect(Scan(&d, i{"key"}, i{"value"})).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{}))
+
+ keys := i{"string", "byte", "int", "int64", "uint", "uint64", "float", "float64", "bool"}
+ vals := i{
+ "str!", "bytes!", "123", "123456789123456789", "456", "987654321987654321",
+ "123.456", "123456789123456789.987654321987654321", "1",
+ }
+ Expect(Scan(&d, keys, vals)).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{
+ String: "str!",
+ Bytes: []byte("bytes!"),
+ Int: 123,
+ Int64: 123456789123456789,
+ Uint: 456,
+ Uint64: 987654321987654321,
+ Float: 123.456,
+ Float64: 1.2345678912345678e+17,
+ Bool: true,
+ }))
+
+ // Scan a different type with the same values to test that
+ // the struct spec maps don't conflict.
+ type data2 struct {
+ String string `redis:"string"`
+ Bytes []byte `redis:"byte"`
+ Int int `redis:"int"`
+ Uint uint `redis:"uint"`
+ Float float32 `redis:"float"`
+ Bool bool `redis:"bool"`
+ }
+ var d2 data2
+ Expect(Scan(&d2, keys, vals)).NotTo(HaveOccurred())
+ Expect(d2).To(Equal(data2{
+ String: "str!",
+ Bytes: []byte("bytes!"),
+ Int: 123,
+ Uint: 456,
+ Float: 123.456,
+ Bool: true,
+ }))
+
+ Expect(Scan(&d, i{"string", "float", "bool"}, i{"", "1", "t"})).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{
+ String: "",
+ Bytes: []byte("bytes!"),
+ Int: 123,
+ Int64: 123456789123456789,
+ Uint: 456,
+ Uint64: 987654321987654321,
+ Float: 1.0,
+ Float64: 1.2345678912345678e+17,
+ Bool: true,
+ }))
+ })
+
+ It("omits untagged fields", func() {
+ var d data
+
+ Expect(Scan(&d, i{"empty", "omit", "string"}, i{"value", "value", "str!"})).NotTo(HaveOccurred())
+ Expect(d).To(Equal(data{
+ String: "str!",
+ }))
+ })
+
+ It("catches bad values", func() {
+ var d data
+
+ Expect(Scan(&d, i{"int"}, i{"a"})).To(HaveOccurred())
+ Expect(Scan(&d, i{"uint"}, i{"a"})).To(HaveOccurred())
+ Expect(Scan(&d, i{"uint"}, i{""})).To(HaveOccurred())
+ Expect(Scan(&d, i{"float"}, i{"b"})).To(HaveOccurred())
+ Expect(Scan(&d, i{"bool"}, i{"-1"})).To(HaveOccurred())
+ Expect(Scan(&d, i{"bool"}, i{""})).To(HaveOccurred())
+ Expect(Scan(&d, i{"bool"}, i{"123"})).To(HaveOccurred())
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go
new file mode 100644
index 0000000..6839412
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go
@@ -0,0 +1,93 @@
+package hscan
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+ m sync.Map
+}
+
+func newStructMap() *structMap {
+ return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+ if v, ok := s.m.Load(t); ok {
+ return v.(*structSpec)
+ }
+
+ spec := newStructSpec(t, "redis")
+ s.m.Store(t, spec)
+ return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+ m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+ s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+ numField := t.NumField()
+ out := &structSpec{
+ m: make(map[string]*structField, numField),
+ }
+
+ for i := 0; i < numField; i++ {
+ f := t.Field(i)
+
+ tag := f.Tag.Get(fieldTag)
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ tag = strings.Split(tag, ",")[0]
+ if tag == "" {
+ continue
+ }
+
+ // Use the built-in decoder.
+ out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+ }
+
+ return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+ index int
+ fn decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+ spec *structSpec
+ value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+ field, ok := s.spec.m[key]
+ if !ok {
+ return nil
+ }
+ if err := field.fn(s.value.Field(field.index), value); err != nil {
+ t := s.value.Type()
+ return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+ value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+ }
+ return nil
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go
new file mode 100644
index 0000000..4a59c59
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ panic("not reached")
+ }
+ if minBackoff == 0 {
+ return 0
+ }
+
+ d := minBackoff << uint(retry)
+ if d < minBackoff {
+ return maxBackoff
+ }
+
+ d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+ if d > maxBackoff || d < minBackoff {
+ d = maxBackoff
+ }
+
+ return d
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go
new file mode 100644
index 0000000..bfdcbbb
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go
@@ -0,0 +1,18 @@
+package internal
+
+import (
+ "testing"
+ "time"
+
+ . "github.com/onsi/gomega"
+)
+
+func TestRetryBackoff(t *testing.T) {
+ RegisterTestingT(t)
+
+ for i := 0; i <= 16; i++ {
+ backoff := RetryBackoff(i, time.Millisecond, 512*time.Millisecond)
+ Expect(backoff >= 0).To(BeTrue())
+ Expect(backoff <= 512*time.Millisecond).To(BeTrue())
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go
new file mode 100644
index 0000000..c8b9213
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+)
+
+type Logging interface {
+ Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+ log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+ _ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
+var Logger Logging = &logger{
+ log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go
new file mode 100644
index 0000000..64f4627
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+// var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go
new file mode 100644
index 0000000..dec5d3f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go
@@ -0,0 +1,97 @@
+package pool_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+type poolGetPutBenchmark struct {
+ poolSize int
+}
+
+func (bm poolGetPutBenchmark) String() string {
+ return fmt.Sprintf("pool=%d", bm.poolSize)
+}
+
+func BenchmarkPoolGetPut(b *testing.B) {
+ ctx := context.Background()
+ benchmarks := []poolGetPutBenchmark{
+ {1},
+ {2},
+ {8},
+ {32},
+ {64},
+ {128},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.String(), func(b *testing.B) {
+ connPool := pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ IdleTimeout: time.Hour,
+ IdleCheckFrequency: time.Hour,
+ })
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ cn, err := connPool.Get(ctx)
+ if err != nil {
+ b.Fatal(err)
+ }
+ connPool.Put(ctx, cn)
+ }
+ })
+ })
+ }
+}
+
+type poolGetRemoveBenchmark struct {
+ poolSize int
+}
+
+func (bm poolGetRemoveBenchmark) String() string {
+ return fmt.Sprintf("pool=%d", bm.poolSize)
+}
+
+func BenchmarkPoolGetRemove(b *testing.B) {
+ ctx := context.Background()
+ benchmarks := []poolGetRemoveBenchmark{
+ {1},
+ {2},
+ {8},
+ {32},
+ {64},
+ {128},
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.String(), func(b *testing.B) {
+ connPool := pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ IdleTimeout: time.Hour,
+ IdleCheckFrequency: time.Hour,
+ })
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ cn, err := connPool.Get(ctx)
+ if err != nil {
+ b.Fatal(err)
+ }
+ connPool.Remove(ctx, cn, nil)
+ }
+ })
+ })
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go
new file mode 100644
index 0000000..5661659
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go
@@ -0,0 +1,121 @@
+package pool
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ usedAt int64 // atomic
+ netConn net.Conn
+
+ rd *proto.Reader
+ bw *bufio.Writer
+ wr *proto.Writer
+
+ Inited bool
+ pooled bool
+ createdAt time.Time
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ createdAt: time.Now(),
+ }
+ cn.rd = proto.NewReader(netConn)
+ cn.bw = bufio.NewWriter(netConn)
+ cn.wr = proto.NewWriter(cn.bw)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ unix := atomic.LoadInt64(&cn.usedAt)
+ return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.rd.Reset(netConn)
+ cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ if cn.netConn != nil {
+ return cn.netConn.RemoteAddr()
+ }
+ return nil
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
+ return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+ ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
+
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
+
+ if err := fn(cn.wr); err != nil {
+ return err
+ }
+
+ return cn.bw.Flush()
+}
+
+func (cn *Conn) Close() error {
+ return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+ tm := time.Now()
+ cn.SetUsedAt(tm)
+
+ if timeout > 0 {
+ tm = tm.Add(timeout)
+ }
+
+ if ctx != nil {
+ deadline, ok := ctx.Deadline()
+ if ok {
+ if timeout == 0 {
+ return deadline
+ }
+ if deadline.Before(tm) {
+ return deadline
+ }
+ return tm
+ }
+ }
+
+ if timeout > 0 {
+ return tm
+ }
+
+ return noDeadline
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go
new file mode 100644
index 0000000..75dd4ad
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go
@@ -0,0 +1,9 @@
+package pool
+
+import (
+ "time"
+)
+
+func (cn *Conn) SetCreatedAt(tm time.Time) {
+ cn.createdAt = tm
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
new file mode 100644
index 0000000..2365dbc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
@@ -0,0 +1,36 @@
+package pool_test
+
+import (
+ "context"
+ "net"
+ "sync"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "pool")
+}
+
+func perform(n int, cbs ...func(int)) {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ wg.Wait()
+}
+
+func dummyDialer(context.Context) (net.Conn, error) {
+ return &net.TCPConn{}, nil
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go
new file mode 100644
index 0000000..44a4e77
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go
@@ -0,0 +1,557 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+var (
+ // ErrClosed performs any operation on the closed client will return this error.
+ ErrClosed = errors.New("redis: client is closed")
+
+ // ErrPoolTimeout timed out waiting to get a connection from the connection pool.
+ ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // number of total connections in the pool
+ IdleConns uint32 // number of idle connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+ NewConn(context.Context) (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get(context.Context) (*Conn, error)
+ Put(context.Context, *Conn)
+ Remove(context.Context, *Conn, error)
+
+ Len() int
+ IdleLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func(context.Context) (net.Conn, error)
+ OnClose func(*Conn) error
+
+ PoolFIFO bool
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+type lastDialErrorWrap struct {
+ err error
+}
+
+type ConnPool struct {
+ opt *Options
+
+ dialErrorsNum uint32 // atomic
+
+ lastDialError atomic.Value
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+ poolSize int
+ idleConnsLen int
+
+ stats Stats
+
+ _closed uint32 // atomic
+ closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ opt: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ idleConns: make([]*Conn, 0, opt.PoolSize),
+ closedCh: make(chan struct{}),
+ }
+
+ p.connsMu.Lock()
+ p.checkMinIdleConns()
+ p.connsMu.Unlock()
+
+ if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+ go p.reaper(opt.IdleCheckFrequency)
+ }
+
+ return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+ if p.opt.MinIdleConns == 0 {
+ return
+ }
+ for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+ p.poolSize++
+ p.idleConnsLen++
+
+ go func() {
+ err := p.addIdleConn()
+ if err != nil && err != ErrClosed {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+ }()
+ }
+}
+
+func (p *ConnPool) addIdleConn() error {
+ cn, err := p.dialConn(context.TODO(), true)
+ if err != nil {
+ return err
+ }
+
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ // It is not allowed to add new connections to the closed connection pool.
+ if p.closed() {
+ _ = cn.Close()
+ return ErrClosed
+ }
+
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ cn, err := p.dialConn(ctx, pooled)
+ if err != nil {
+ return nil, err
+ }
+
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ // It is not allowed to add new connections to the closed connection pool.
+ if p.closed() {
+ _ = cn.Close()
+ return nil, ErrClosed
+ }
+
+ p.conns = append(p.conns, cn)
+ if pooled {
+ // If pool is full remove the cn on next Put.
+ if p.poolSize >= p.opt.PoolSize {
+ cn.pooled = false
+ } else {
+ p.poolSize++
+ }
+ }
+
+ return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ return nil, p.getLastDialError()
+ }
+
+ netConn, err := p.opt.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ if p.closed() {
+ return
+ }
+
+ conn, err := p.opt.Dialer(context.Background())
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+ err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+ if err != nil {
+ return err.err
+ }
+ return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if err := p.waitTurn(ctx); err != nil {
+ return nil, err
+ }
+
+ for {
+ p.connsMu.Lock()
+ cn, err := p.popIdle()
+ p.connsMu.Unlock()
+
+ if err != nil {
+ return nil, err
+ }
+
+ if cn == nil {
+ break
+ }
+
+ if p.isStaleConn(cn) {
+ _ = p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, nil
+ }
+
+ atomic.AddUint32(&p.stats.Misses, 1)
+
+ newcn, err := p.newConn(ctx, true)
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+ p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ select {
+ case p.queue <- struct{}{}:
+ return nil
+ default:
+ }
+
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.opt.PoolTimeout)
+
+ select {
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return ctx.Err()
+ case p.queue <- struct{}{}:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return nil
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return ErrPoolTimeout
+ }
+}
+
+func (p *ConnPool) freeTurn() {
+ <-p.queue
+}
+
+func (p *ConnPool) popIdle() (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+ n := len(p.idleConns)
+ if n == 0 {
+ return nil, nil
+ }
+
+ var cn *Conn
+ if p.opt.PoolFIFO {
+ cn = p.idleConns[0]
+ copy(p.idleConns, p.idleConns[1:])
+ p.idleConns = p.idleConns[:n-1]
+ } else {
+ idx := n - 1
+ cn = p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ }
+ p.idleConnsLen--
+ p.checkMinIdleConns()
+ return cn, nil
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+ if cn.rd.Buffered() > 0 {
+ internal.Logger.Printf(ctx, "Conn has unread data")
+ p.Remove(ctx, cn, BadConnError{})
+ return
+ }
+
+ if !cn.pooled {
+ p.Remove(ctx, cn, nil)
+ return
+ }
+
+ p.connsMu.Lock()
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ p.connsMu.Unlock()
+ p.freeTurn()
+}
+
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.removeConnWithLock(cn)
+ p.freeTurn()
+ _ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.removeConnWithLock(cn)
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+ p.connsMu.Lock()
+ p.removeConn(cn)
+ p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
+ return
+ }
+ }
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ if p.opt.OnClose != nil {
+ _ = p.opt.OnClose(cn)
+ }
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ n := len(p.conns)
+ p.connsMu.Unlock()
+ return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+ idleLen := p.IdleLen()
+ return &Stats{
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+ TotalConns: uint32(p.Len()),
+ IdleConns: uint32(idleLen),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ var firstErr error
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+ close(p.closedCh)
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.poolSize = 0
+ p.idleConns = nil
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ // It is possible that ticker and closedCh arrive together,
+ // and select pseudo-randomly pick ticker case, we double
+ // check here to prevent being executed after closed.
+ if p.closed() {
+ return
+ }
+ _, err := p.ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
+ continue
+ }
+ case <-p.closedCh:
+ return
+ }
+ }
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+ var n int
+ for {
+ p.getTurn()
+
+ p.connsMu.Lock()
+ cn := p.reapStaleConn()
+ p.connsMu.Unlock()
+
+ p.freeTurn()
+
+ if cn != nil {
+ _ = p.closeConn(cn)
+ n++
+ } else {
+ break
+ }
+ }
+ atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+ return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ cn := p.idleConns[0]
+ if !p.isStaleConn(cn) {
+ return nil
+ }
+
+ p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+ p.idleConnsLen--
+ p.removeConn(cn)
+
+ return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+ if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ return false
+ }
+
+ now := time.Now()
+ if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+ return true
+ }
+ if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+ return true
+ }
+
+ return false
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go
new file mode 100644
index 0000000..5a3fde1
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+ pool Pooler
+ cn *Conn
+ stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ pool: pool,
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.stickyErr != nil {
+ return nil, p.stickyErr
+ }
+ return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.cn = nil
+ p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+ p.cn = nil
+ p.stickyErr = ErrClosed
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go
new file mode 100644
index 0000000..3adb99b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go
@@ -0,0 +1,201 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+)
+
+const (
+ stateDefault = 0
+ stateInited = 1
+ stateClosed = 2
+)
+
+type BadConnError struct {
+ wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+ s := "redis: Conn is in a bad state"
+ if e.wrapped != nil {
+ s += ": " + e.wrapped.Error()
+ }
+ return s
+}
+
+func (e BadConnError) Unwrap() error {
+ return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+ pool Pooler
+ shared int32 // atomic
+
+ state uint32 // atomic
+ ch chan *Conn
+
+ _badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+ p, ok := pool.(*StickyConnPool)
+ if !ok {
+ p = &StickyConnPool{
+ pool: pool,
+ ch: make(chan *Conn, 1),
+ }
+ }
+ atomic.AddInt32(&p.shared, 1)
+ return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+ // In worst case this races with Close which is not a very common operation.
+ for i := 0; i < 1000; i++ {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ return cn, nil
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ case stateInited:
+ if err := p.badConnError(); err != nil {
+ return nil, err
+ }
+ cn, ok := <-p.ch
+ if !ok {
+ return nil, ErrClosed
+ }
+ return cn, nil
+ case stateClosed:
+ return nil, ErrClosed
+ default:
+ panic("not reached")
+ }
+ }
+ return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+ defer func() {
+ if recover() != nil {
+ p.freeConn(ctx, cn)
+ }
+ }()
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+ if err := p.badConnError(); err != nil {
+ p.pool.Remove(ctx, cn, err)
+ } else {
+ p.pool.Put(ctx, cn)
+ }
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ defer func() {
+ if recover() != nil {
+ p.pool.Remove(ctx, cn, ErrClosed)
+ }
+ }()
+ p._badConnError.Store(BadConnError{wrapped: reason})
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+ if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+ return nil
+ }
+
+ for i := 0; i < 1000; i++ {
+ state := atomic.LoadUint32(&p.state)
+ if state == stateClosed {
+ return ErrClosed
+ }
+ if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+ close(p.ch)
+ cn, ok := <-p.ch
+ if ok {
+ p.freeConn(context.TODO(), cn)
+ }
+ return nil
+ }
+ }
+
+ return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+ if p.badConnError() == nil {
+ return nil
+ }
+
+ select {
+ case cn, ok := <-p.ch:
+ if !ok {
+ return ErrClosed
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ p._badConnError.Store(BadConnError{wrapped: nil})
+ default:
+ return errors.New("redis: StickyConnPool does not have a Conn")
+ }
+
+ if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+ state := atomic.LoadUint32(&p.state)
+ return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+ }
+
+ return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+ if v := p._badConnError.Load(); v != nil {
+ if err := v.(BadConnError); err.wrapped != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *StickyConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go
new file mode 100644
index 0000000..423a783
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go
@@ -0,0 +1,458 @@
+package pool_test
+
+import (
+ "context"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+var _ = Describe("ConnPool", func() {
+ ctx := context.Background()
+ var connPool *pool.ConnPool
+
+ BeforeEach(func() {
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ IdleTimeout: time.Millisecond,
+ IdleCheckFrequency: time.Millisecond,
+ })
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ It("should safe close", func() {
+ const minIdleConns = 10
+
+ var (
+ wg sync.WaitGroup
+ closedChan = make(chan struct{})
+ )
+ wg.Add(minIdleConns)
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ wg.Done()
+ <-closedChan
+ return &net.TCPConn{}, nil
+ },
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ IdleTimeout: time.Millisecond,
+ IdleCheckFrequency: time.Millisecond,
+ MinIdleConns: minIdleConns,
+ })
+ wg.Wait()
+ Expect(connPool.Close()).NotTo(HaveOccurred())
+ close(closedChan)
+
+ // We wait for 1 second and believe that checkMinIdleConns has been executed.
+ time.Sleep(time.Second)
+
+ Expect(connPool.Stats()).To(Equal(&pool.Stats{
+ Hits: 0,
+ Misses: 0,
+ Timeouts: 0,
+ TotalConns: 0,
+ IdleConns: 0,
+ StaleConns: 0,
+ }))
+ })
+
+ It("should unblock client when conn is removed", func() {
+ // Reserve one connection.
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reserve all other connections.
+ var cns []*pool.Conn
+ for i := 0; i < 9; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ cns = append(cns, cn)
+ }
+
+ started := make(chan bool, 1)
+ done := make(chan bool, 1)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ _, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ done <- true
+
+ connPool.Put(ctx, cn)
+ }()
+ <-started
+
+ // Check that Get is blocked.
+ select {
+ case <-done:
+ Fail("Get is not blocked")
+ case <-time.After(time.Millisecond):
+ // ok
+ }
+
+ connPool.Remove(ctx, cn, nil)
+
+ // Check that Get is unblocked.
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("Get is not unblocked")
+ }
+
+ for _, cn := range cns {
+ connPool.Put(ctx, cn)
+ }
+ })
+})
+
+var _ = Describe("MinIdleConns", func() {
+ const poolSize = 100
+ ctx := context.Background()
+ var minIdleConns int
+ var connPool *pool.ConnPool
+
+ newConnPool := func() *pool.ConnPool {
+ connPool := pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: poolSize,
+ MinIdleConns: minIdleConns,
+ PoolTimeout: 100 * time.Millisecond,
+ IdleTimeout: -1,
+ IdleCheckFrequency: -1,
+ })
+ Eventually(func() int {
+ return connPool.Len()
+ }).Should(Equal(minIdleConns))
+ return connPool
+ }
+
+ assert := func() {
+ It("has idle connections when created", func() {
+ Expect(connPool.Len()).To(Equal(minIdleConns))
+ Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+ })
+
+ Context("after Get", func() {
+ var cn *pool.Conn
+
+ BeforeEach(func() {
+ var err error
+ cn, err = connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ Eventually(func() int {
+ return connPool.Len()
+ }).Should(Equal(minIdleConns + 1))
+ })
+
+ It("has idle connections", func() {
+ Expect(connPool.Len()).To(Equal(minIdleConns + 1))
+ Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+ })
+
+ Context("after Remove", func() {
+ BeforeEach(func() {
+ connPool.Remove(ctx, cn, nil)
+ })
+
+ It("has idle connections", func() {
+ Expect(connPool.Len()).To(Equal(minIdleConns))
+ Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+ })
+ })
+ })
+
+ Describe("Get does not exceed pool size", func() {
+ var mu sync.RWMutex
+ var cns []*pool.Conn
+
+ BeforeEach(func() {
+ cns = make([]*pool.Conn, 0)
+
+ perform(poolSize, func(_ int) {
+ defer GinkgoRecover()
+
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ mu.Lock()
+ cns = append(cns, cn)
+ mu.Unlock()
+ })
+
+ Eventually(func() int {
+ return connPool.Len()
+ }).Should(BeNumerically(">=", poolSize))
+ })
+
+ It("Get is blocked", func() {
+ done := make(chan struct{})
+ go func() {
+ connPool.Get(ctx)
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ Fail("Get is not blocked")
+ case <-time.After(time.Millisecond):
+ // ok
+ }
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("Get is not unblocked")
+ }
+ })
+
+ Context("after Put", func() {
+ BeforeEach(func() {
+ perform(len(cns), func(i int) {
+ mu.RLock()
+ connPool.Put(ctx, cns[i])
+ mu.RUnlock()
+ })
+
+ Eventually(func() int {
+ return connPool.Len()
+ }).Should(Equal(poolSize))
+ })
+
+ It("pool.Len is back to normal", func() {
+ Expect(connPool.Len()).To(Equal(poolSize))
+ Expect(connPool.IdleLen()).To(Equal(poolSize))
+ })
+ })
+
+ Context("after Remove", func() {
+ BeforeEach(func() {
+ perform(len(cns), func(i int) {
+ mu.RLock()
+ connPool.Remove(ctx, cns[i], nil)
+ mu.RUnlock()
+ })
+
+ Eventually(func() int {
+ return connPool.Len()
+ }).Should(Equal(minIdleConns))
+ })
+
+ It("has idle connections", func() {
+ Expect(connPool.Len()).To(Equal(minIdleConns))
+ Expect(connPool.IdleLen()).To(Equal(minIdleConns))
+ })
+ })
+ })
+ }
+
+ Context("minIdleConns = 1", func() {
+ BeforeEach(func() {
+ minIdleConns = 1
+ connPool = newConnPool()
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ assert()
+ })
+
+ Context("minIdleConns = 32", func() {
+ BeforeEach(func() {
+ minIdleConns = 32
+ connPool = newConnPool()
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ assert()
+ })
+})
+
+var _ = Describe("conns reaper", func() {
+ const idleTimeout = time.Minute
+ const maxAge = time.Hour
+
+ ctx := context.Background()
+ var connPool *pool.ConnPool
+ var conns, staleConns, closedConns []*pool.Conn
+
+ assert := func(typ string) {
+ BeforeEach(func() {
+ closedConns = nil
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ IdleTimeout: idleTimeout,
+ MaxConnAge: maxAge,
+ PoolTimeout: time.Second,
+ IdleCheckFrequency: time.Hour,
+ OnClose: func(cn *pool.Conn) error {
+ closedConns = append(closedConns, cn)
+ return nil
+ },
+ })
+
+ conns = nil
+
+ // add stale connections
+ staleConns = nil
+ for i := 0; i < 3; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ switch typ {
+ case "idle":
+ cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
+ case "aged":
+ cn.SetCreatedAt(time.Now().Add(-2 * maxAge))
+ }
+ conns = append(conns, cn)
+ staleConns = append(staleConns, cn)
+ }
+
+ // add fresh connections
+ for i := 0; i < 3; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ conns = append(conns, cn)
+ }
+
+ for _, cn := range conns {
+ connPool.Put(ctx, cn)
+ }
+
+ Expect(connPool.Len()).To(Equal(6))
+ Expect(connPool.IdleLen()).To(Equal(6))
+
+ n, err := connPool.ReapStaleConns()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(3))
+ })
+
+ AfterEach(func() {
+ _ = connPool.Close()
+ Expect(connPool.Len()).To(Equal(0))
+ Expect(connPool.IdleLen()).To(Equal(0))
+ Expect(len(closedConns)).To(Equal(len(conns)))
+ Expect(closedConns).To(ConsistOf(conns))
+ })
+
+ It("reaps stale connections", func() {
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.IdleLen()).To(Equal(3))
+ })
+
+ It("does not reap fresh connections", func() {
+ n, err := connPool.ReapStaleConns()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(0))
+ })
+
+ It("stale connections are closed", func() {
+ Expect(len(closedConns)).To(Equal(len(staleConns)))
+ Expect(closedConns).To(ConsistOf(staleConns))
+ })
+
+ It("pool is functional", func() {
+ for j := 0; j < 3; j++ {
+ var freeCns []*pool.Conn
+ for i := 0; i < 3; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ freeCns = append(freeCns, cn)
+ }
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.IdleLen()).To(Equal(0))
+
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ conns = append(conns, cn)
+
+ Expect(connPool.Len()).To(Equal(4))
+ Expect(connPool.IdleLen()).To(Equal(0))
+
+ connPool.Remove(ctx, cn, nil)
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.IdleLen()).To(Equal(0))
+
+ for _, cn := range freeCns {
+ connPool.Put(ctx, cn)
+ }
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.IdleLen()).To(Equal(3))
+ }
+ })
+ }
+
+ assert("idle")
+ assert("aged")
+})
+
+var _ = Describe("race", func() {
+ ctx := context.Background()
+ var connPool *pool.ConnPool
+ var C, N int
+
+ BeforeEach(func() {
+ C, N = 10, 1000
+ if testing.Short() {
+ C = 4
+ N = 100
+ }
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ It("does not happen on Get, Put, and Remove", func() {
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Minute,
+ IdleTimeout: time.Millisecond,
+ IdleCheckFrequency: time.Millisecond,
+ })
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ if err == nil {
+ connPool.Put(ctx, cn)
+ }
+ }
+ }, func(id int) {
+ for i := 0; i < N; i++ {
+ cn, err := connPool.Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ if err == nil {
+ connPool.Remove(ctx, cn, nil)
+ }
+ }
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go
new file mode 100644
index 0000000..c9a820e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go
@@ -0,0 +1,13 @@
+package proto_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "proto")
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
new file mode 100644
index 0000000..0e6ca77
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
@@ -0,0 +1,332 @@
+package proto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+ ErrorReply = '-'
+ StatusReply = '+'
+ IntReply = ':'
+ StringReply = '$'
+ ArrayReply = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+ rd *bufio.Reader
+ _buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ _buf: make([]byte, 64),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if isNilReply(line) {
+ return nil, Nil
+ }
+ return line, nil
+}
+
+// readLine that returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...) //nolint:makezero
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ case StringReply:
+ return r.readStringReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+ return nil, err
+ }
+ return m(r, n)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ default:
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return "", ParseErrorReply(line)
+ case StringReply:
+ return r.readStringReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return string(line[1:]), nil
+ default:
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+ }
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ if isNilReply(line) {
+ return "", Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, replyLen+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(r, n)
+ default:
+ return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return 0, err
+ }
+ return int(n), nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != 2 {
+ return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+ }
+
+ cursor, err := r.ReadUint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ n, err = r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ keys := make([]string, n)
+
+ for i := 0; i < n; i++ {
+ key, err := r.ReadString()
+ if err != nil {
+ return nil, 0, err
+ }
+ keys[i] = key
+ }
+
+ return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return r._readTmpBytesReply(line)
+ case StatusReply:
+ return line[1:], nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ buf := r.buf(replyLen + 2)
+ _, err = io.ReadFull(r.rd, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+ if n <= cap(r._buf) {
+ return r._buf[:n]
+ }
+ d := n - cap(r._buf)
+ r._buf = append(r._buf, make([]byte, d)...)
+ return r._buf
+}
+
+func isNilReply(b []byte) bool {
+ return len(b) == 3 &&
+ (b[0] == StringReply || b[0] == ArrayReply) &&
+ b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+ if isNilReply(line) {
+ return 0, Nil
+ }
+ return util.ParseInt(line[1:], 10, 64)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
new file mode 100644
index 0000000..b8c99dd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
@@ -0,0 +1,72 @@
+package proto_test
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+func BenchmarkReader_ParseReply_Status(b *testing.B) {
+ benchmarkParseReply(b, "+OK\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Int(b *testing.B) {
+ benchmarkParseReply(b, ":1\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Error(b *testing.B) {
+ benchmarkParseReply(b, "-Error message\r\n", nil, true)
+}
+
+func BenchmarkReader_ParseReply_String(b *testing.B) {
+ benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Slice(b *testing.B) {
+ benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", multiBulkParse, false)
+}
+
+func TestReader_ReadLine(t *testing.T) {
+ original := bytes.Repeat([]byte("a"), 8192)
+ original[len(original)-2] = '\r'
+ original[len(original)-1] = '\n'
+ r := proto.NewReader(bytes.NewReader(original))
+ read, err := r.ReadLine()
+ if err != nil && err != io.EOF {
+ t.Errorf("Should be able to read the full buffer: %v", err)
+ }
+
+ if bytes.Compare(read, original[:len(original)-2]) != 0 {
+ t.Errorf("Values must be equal: %d expected %d", len(read), len(original[:len(original)-2]))
+ }
+}
+
+func benchmarkParseReply(b *testing.B, reply string, m proto.MultiBulkParse, wanterr bool) {
+ buf := new(bytes.Buffer)
+ for i := 0; i < b.N; i++ {
+ buf.WriteString(reply)
+ }
+ p := proto.NewReader(buf)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := p.ReadReply(m)
+ if !wanterr && err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func multiBulkParse(p *proto.Reader, n int64) (interface{}, error) {
+ vv := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := p.ReadReply(multiBulkParse)
+ if err != nil {
+ return nil, err
+ }
+ vv = append(vv, v)
+ }
+ return vv, nil
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go
new file mode 100644
index 0000000..0e99476
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go
@@ -0,0 +1,180 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+//nolint:gocyclo
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return fmt.Errorf("redis: Scan(nil)")
+ case *string:
+ *v = util.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = util.Atoi(b)
+ return err
+ case *int8:
+ n, err := util.ParseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := util.ParseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := util.ParseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := util.ParseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := util.ParseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := util.ParseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := util.ParseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = util.ParseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case *time.Time:
+ var err error
+ *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+ return err
+ case *time.Duration:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = time.Duration(n)
+ return nil
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ next := makeSliceNextElemFunc(v)
+ for i, s := range data {
+ elem := next()
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ elem := v.Index(v.Len() - 1)
+ if elem.IsNil() {
+ elem.Set(reflect.New(elemType))
+ }
+ return elem.Elem()
+ }
+
+ elem := reflect.New(elemType)
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+ }
+
+ zero := reflect.Zero(elemType)
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ v.Set(reflect.Append(v, zero))
+ return v.Index(v.Len() - 1)
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go
new file mode 100644
index 0000000..55df550
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go
@@ -0,0 +1,50 @@
+package proto_test
+
+import (
+ "encoding/json"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+type testScanSliceStruct struct {
+ ID int
+ Name string
+}
+
+func (s *testScanSliceStruct) MarshalBinary() ([]byte, error) {
+ return json.Marshal(s)
+}
+
+func (s *testScanSliceStruct) UnmarshalBinary(b []byte) error {
+ return json.Unmarshal(b, s)
+}
+
+var _ = Describe("ScanSlice", func() {
+ data := []string{
+ `{"ID":-1,"Name":"Back Yu"}`,
+ `{"ID":1,"Name":"szyhf"}`,
+ }
+
+ It("[]testScanSliceStruct", func() {
+ var slice []testScanSliceStruct
+ err := proto.ScanSlice(data, &slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]testScanSliceStruct{
+ {-1, "Back Yu"},
+ {1, "szyhf"},
+ }))
+ })
+
+ It("var testContainer []*testScanSliceStruct", func() {
+ var slice []*testScanSliceStruct
+ err := proto.ScanSlice(data, &slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]*testScanSliceStruct{
+ {-1, "Back Yu"},
+ {1, "szyhf"},
+ }))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go
new file mode 100644
index 0000000..c426098
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go
@@ -0,0 +1,155 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ // io.StringWriter
+ WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+ writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+ return &Writer{
+ writer: wr,
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ if err := w.WriteByte(ArrayReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(args)); err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ if err := w.WriteArg(arg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case int8:
+ return w.int(int64(v))
+ case int16:
+ return w.int(int64(v))
+ case int32:
+ return w.int(int64(v))
+ case int64:
+ return w.int(v)
+ case uint:
+ return w.uint(uint64(v))
+ case uint8:
+ return w.uint(uint64(v))
+ case uint16:
+ return w.uint(uint64(v))
+ case uint32:
+ return w.uint(uint64(v))
+ case uint64:
+ return w.uint(v)
+ case float32:
+ return w.float(float64(v))
+ case float64:
+ return w.float(v)
+ case bool:
+ if v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case time.Time:
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
+ case time.Duration:
+ return w.int(v.Nanoseconds())
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ if err := w.WriteByte(StringReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(b)); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ if err := w.WriteByte('\r'); err != nil {
+ return err
+ }
+ return w.WriteByte('\n')
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
new file mode 100644
index 0000000..ebae569
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
@@ -0,0 +1,93 @@
+package proto_test
+
+import (
+ "bytes"
+ "encoding"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+type MyType struct{}
+
+var _ encoding.BinaryMarshaler = (*MyType)(nil)
+
+func (t *MyType) MarshalBinary() ([]byte, error) {
+ return []byte("hello"), nil
+}
+
+var _ = Describe("WriteBuffer", func() {
+ var buf *bytes.Buffer
+ var wr *proto.Writer
+
+ BeforeEach(func() {
+ buf = new(bytes.Buffer)
+ wr = proto.NewWriter(buf)
+ })
+
+ It("should write args", func() {
+ err := wr.WriteArgs([]interface{}{
+ "string",
+ 12,
+ 34.56,
+ []byte{'b', 'y', 't', 'e', 's'},
+ true,
+ nil,
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
+ "$6\r\nstring\r\n" +
+ "$2\r\n12\r\n" +
+ "$5\r\n34.56\r\n" +
+ "$5\r\nbytes\r\n" +
+ "$1\r\n1\r\n" +
+ "$0\r\n" +
+ "\r\n")))
+ })
+
+ It("should append time", func() {
+ tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
+ err := wr.WriteArgs([]interface{}{tm})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(41))
+ })
+
+ It("should append marshalable args", func() {
+ err := wr.WriteArgs([]interface{}{&MyType{}})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(15))
+ })
+})
+
+type discard struct{}
+
+func (discard) Write(b []byte) (int, error) {
+ return len(b), nil
+}
+
+func (discard) WriteString(s string) (int, error) {
+ return len(s), nil
+}
+
+func (discard) WriteByte(c byte) error {
+ return nil
+}
+
+func BenchmarkWriteBuffer_Append(b *testing.B) {
+ buf := proto.NewWriter(discard{})
+ args := []interface{}{"hello", "world", "foo", "bar"}
+
+ for i := 0; i < b.N; i++ {
+ err := buf.WriteArgs(args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go
new file mode 100644
index 0000000..2edccba
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go
@@ -0,0 +1,50 @@
+package rand
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+ src rand.Source
+ mu sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+ s.mu.Lock()
+ n := s.src.Int63()
+ s.mu.Unlock()
+ return n
+}
+
+func (s *source) Seed(seed int64) {
+ s.mu.Lock()
+ s.src.Seed(seed)
+ s.mu.Unlock()
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
new file mode 100644
index 0000000..fd2f434
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+ return string(b)
+}
+
+func Bytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
new file mode 100644
index 0000000..9f2e418
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
@@ -0,0 +1,21 @@
+//go:build !appengine
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go
new file mode 100644
index 0000000..e34a7f0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go
@@ -0,0 +1,46 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go
new file mode 100644
index 0000000..2130711
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go
@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go
new file mode 100644
index 0000000..db50338
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+ return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go
new file mode 100644
index 0000000..daa8d76
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go
@@ -0,0 +1,23 @@
+//go:build !appengine
+// +build !appengine
+
+package util
+
+import (
+ "unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
new file mode 100644
index 0000000..b1dd0bd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
@@ -0,0 +1,67 @@
+package redis
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("newClusterState", func() {
+ var state *clusterState
+
+ createClusterState := func(slots []ClusterSlot) *clusterState {
+ opt := &ClusterOptions{}
+ opt.init()
+ nodes := newClusterNodes(opt)
+ state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
+ Expect(err).NotTo(HaveOccurred())
+ return state
+ }
+
+ Describe("sorting", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Start: 1000,
+ End: 1999,
+ }, {
+ Start: 0,
+ End: 999,
+ }, {
+ Start: 2000,
+ End: 2999,
+ }})
+ })
+
+ It("sorts slots", func() {
+ Expect(state.slots).To(Equal([]*clusterSlot{
+ {start: 0, end: 999, nodes: nil},
+ {start: 1000, end: 1999, nodes: nil},
+ {start: 2000, end: 2999, nodes: nil},
+ }))
+ })
+ })
+
+ Describe("loopback", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: ":1234"}},
+ }})
+ })
+
+ It("replaces loopback hosts in addresses", func() {
+ slotAddr := func(slot *clusterSlot) string {
+ return slot.nodes[0].Client.Options().Addr
+ }
+
+ Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
+ Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
+ Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
+ Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go
new file mode 100644
index 0000000..2f8bc2b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go
@@ -0,0 +1,77 @@
+package redis
+
+import (
+ "context"
+ "sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+ mu sync.Mutex // protects Scanner and pos
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ it.mu.Lock()
+ err := it.cmd.Err()
+ it.mu.Unlock()
+ return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ switch it.cmd.args[0] {
+ case "scan", "qscan":
+ it.cmd.args[1] = it.cmd.cursor
+ default:
+ it.cmd.args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(ctx, it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ it.mu.Lock()
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ it.mu.Unlock()
+ return v
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go
new file mode 100644
index 0000000..68c8b77
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go
@@ -0,0 +1,136 @@
+package redis_test
+
+import (
+ "fmt"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("ScanIterator", func() {
+ var client *redis.Client
+
+ seed := func(n int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= n; i++ {
+ pipe.Set(ctx, fmt.Sprintf("K%02d", i), "x", 0).Err()
+ }
+ _, err := pipe.Exec(ctx)
+ return err
+ }
+
+ extraSeed := func(n int, m int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= m; i++ {
+ pipe.Set(ctx, fmt.Sprintf("A%02d", i), "x", 0).Err()
+ }
+ for i := 1; i <= n; i++ {
+ pipe.Set(ctx, fmt.Sprintf("K%02d", i), "x", 0).Err()
+ }
+ _, err := pipe.Exec(ctx)
+ return err
+ }
+
+ hashKey := "K_HASHTEST"
+ hashSeed := func(n int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= n; i++ {
+ pipe.HSet(ctx, hashKey, fmt.Sprintf("K%02d", i), "x").Err()
+ }
+ _, err := pipe.Exec(ctx)
+ return err
+ }
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should scan across empty DBs", func() {
+ iter := client.Scan(ctx, 0, "", 10).Iterator()
+ Expect(iter.Next(ctx)).To(BeFalse())
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ })
+
+ It("should scan across one page", func() {
+ Expect(seed(7)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(ctx, 0, "", 0).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(ConsistOf([]string{"K01", "K02", "K03", "K04", "K05", "K06", "K07"}))
+ })
+
+ It("should scan across multiple pages", func() {
+ Expect(seed(71)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(ctx, 0, "", 10).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(71))
+ Expect(vals).To(ContainElement("K01"))
+ Expect(vals).To(ContainElement("K71"))
+ })
+
+ It("should hscan across multiple pages", func() {
+ Expect(hashSeed(71)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.HScan(ctx, hashKey, 0, "", 10).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(71 * 2))
+ Expect(vals).To(ContainElement("K01"))
+ Expect(vals).To(ContainElement("K71"))
+ })
+
+ It("should scan to page borders", func() {
+ Expect(seed(20)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(ctx, 0, "", 10).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(20))
+ })
+
+ It("should scan with match", func() {
+ Expect(seed(33)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(ctx, 0, "K*2*", 10).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(13))
+ })
+
+ It("should scan with match across empty pages", func() {
+ Expect(extraSeed(2, 10)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(ctx, 0, "K*", 1).Iterator()
+ for iter.Next(ctx) {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(2))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go
new file mode 100644
index 0000000..5414310
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go
@@ -0,0 +1,448 @@
+package redis_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+const (
+ redisPort = "6380"
+ redisAddr = ":" + redisPort
+ redisSecondaryPort = "6381"
+)
+
+const (
+ ringShard1Port = "6390"
+ ringShard2Port = "6391"
+ ringShard3Port = "6392"
+)
+
+const (
+ sentinelName = "mymaster"
+ sentinelMasterPort = "9123"
+ sentinelSlave1Port = "9124"
+ sentinelSlave2Port = "9125"
+ sentinelPort1 = "9126"
+ sentinelPort2 = "9127"
+ sentinelPort3 = "9128"
+)
+
+var (
+ sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
+
+ processes map[string]*redisProcess
+
+ redisMain *redisProcess
+ ringShard1, ringShard2, ringShard3 *redisProcess
+ sentinelMaster, sentinelSlave1, sentinelSlave2 *redisProcess
+ sentinel1, sentinel2, sentinel3 *redisProcess
+)
+
+var cluster = &clusterScenario{
+ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+ nodeIDs: make([]string, 6),
+ processes: make(map[string]*redisProcess, 6),
+ clients: make(map[string]*redis.Client, 6),
+}
+
+func registerProcess(port string, p *redisProcess) {
+ if processes == nil {
+ processes = make(map[string]*redisProcess)
+ }
+ processes[port] = p
+}
+
+var _ = BeforeSuite(func() {
+ var err error
+
+ redisMain, err = startRedis(redisPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ ringShard1, err = startRedis(ringShard1Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ ringShard3, err = startRedis(ringShard3Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelMaster, err = startRedis(sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinel1, err = startSentinel(sentinelPort1, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinel2, err = startSentinel(sentinelPort2, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinel3, err = startSentinel(sentinelPort3, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelSlave1, err = startRedis(
+ sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelSlave2, err = startRedis(
+ sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(startCluster(ctx, cluster)).NotTo(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ Expect(cluster.Close()).NotTo(HaveOccurred())
+
+ for _, p := range processes {
+ Expect(p.Close()).NotTo(HaveOccurred())
+ }
+ processes = nil
+})
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "go-redis")
+}
+
+//------------------------------------------------------------------------------
+
+func redisOptions() *redis.Options {
+ return &redis.Options{
+ Addr: redisAddr,
+ DB: 15,
+
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+
+ MaxRetries: -1,
+
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: time.Minute,
+ IdleCheckFrequency: 100 * time.Millisecond,
+ }
+}
+
+func redisClusterOptions() *redis.ClusterOptions {
+ return &redis.ClusterOptions{
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+
+ MaxRedirects: 8,
+
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: time.Minute,
+ IdleCheckFrequency: 100 * time.Millisecond,
+ }
+}
+
+func redisRingOptions() *redis.RingOptions {
+ return &redis.RingOptions{
+ Addrs: map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ "ringShardTwo": ":" + ringShard2Port,
+ },
+
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+
+ MaxRetries: -1,
+
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: time.Minute,
+ IdleCheckFrequency: 100 * time.Millisecond,
+ }
+}
+
+func performAsync(n int, cbs ...func(int)) *sync.WaitGroup {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ return &wg
+}
+
+func perform(n int, cbs ...func(int)) {
+ wg := performAsync(n, cbs...)
+ wg.Wait()
+}
+
+func eventually(fn func() error, timeout time.Duration) error {
+ errCh := make(chan error, 1)
+ done := make(chan struct{})
+ exit := make(chan struct{})
+
+ go func() {
+ for {
+ err := fn()
+ if err == nil {
+ close(done)
+ return
+ }
+
+ select {
+ case errCh <- err:
+ default:
+ }
+
+ select {
+ case <-exit:
+ return
+ case <-time.After(timeout / 100):
+ }
+ }
+ }()
+
+ select {
+ case <-done:
+ return nil
+ case <-time.After(timeout):
+ close(exit)
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return fmt.Errorf("timeout after %s without an error", timeout)
+ }
+ }
+}
+
+func execCmd(name string, args ...string) (*os.Process, error) {
+ cmd := exec.Command(name, args...)
+ if testing.Verbose() {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ return cmd.Process, cmd.Start()
+}
+
+func connectTo(port string) (*redis.Client, error) {
+ client := redis.NewClient(&redis.Options{
+ Addr: ":" + port,
+ MaxRetries: -1,
+ })
+
+ err := eventually(func() error {
+ return client.Ping(ctx).Err()
+ }, 30*time.Second)
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+type redisProcess struct {
+ *os.Process
+ *redis.Client
+}
+
+func (p *redisProcess) Close() error {
+ if err := p.Kill(); err != nil {
+ return err
+ }
+
+ err := eventually(func() error {
+ if err := p.Client.Ping(ctx).Err(); err != nil {
+ return nil
+ }
+ return errors.New("client is not shutdown")
+ }, 10*time.Second)
+ if err != nil {
+ return err
+ }
+
+ p.Client.Close()
+ return nil
+}
+
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+
+func redisDir(port string) (string, error) {
+ dir, err := filepath.Abs(filepath.Join("testdata", "instances", port))
+ if err != nil {
+ return "", err
+ }
+ if err := os.RemoveAll(dir); err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(dir, 0o775); err != nil {
+ return "", err
+ }
+ return dir, nil
+}
+
+func startRedis(port string, args ...string) (*redisProcess, error) {
+ dir, err := redisDir(port)
+ if err != nil {
+ return nil, err
+ }
+ if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
+ return nil, err
+ }
+
+ baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir}
+ process, err := execCmd(redisServerBin, append(baseArgs, args...)...)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := connectTo(port)
+ if err != nil {
+ process.Kill()
+ return nil, err
+ }
+
+ p := &redisProcess{process, client}
+ registerProcess(port, p)
+ return p, err
+}
+
+func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
+ dir, err := redisDir(port)
+ if err != nil {
+ return nil, err
+ }
+
+ process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := connectTo(port)
+ if err != nil {
+ process.Kill()
+ return nil, err
+ }
+
+ // set down-after-milliseconds=2000
+ // link: https://github.com/redis/redis/issues/8607
+ for _, cmd := range []*redis.StatusCmd{
+ redis.NewStatusCmd(ctx, "SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "2"),
+ redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "down-after-milliseconds", "2000"),
+ redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "failover-timeout", "1000"),
+ redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "parallel-syncs", "1"),
+ } {
+ client.Process(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ process.Kill()
+ return nil, err
+ }
+ }
+
+ p := &redisProcess{process, client}
+ registerProcess(port, p)
+ return p, nil
+}
+
+//------------------------------------------------------------------------------
+
+type badConnError string
+
+func (e badConnError) Error() string { return string(e) }
+func (e badConnError) Timeout() bool { return true }
+func (e badConnError) Temporary() bool { return false }
+
+type badConn struct {
+ net.TCPConn
+
+ readDelay, writeDelay time.Duration
+ readErr, writeErr error
+}
+
+var _ net.Conn = &badConn{}
+
+func (cn *badConn) SetReadDeadline(t time.Time) error {
+ return nil
+}
+
+func (cn *badConn) SetWriteDeadline(t time.Time) error {
+ return nil
+}
+
+func (cn *badConn) Read([]byte) (int, error) {
+ if cn.readDelay != 0 {
+ time.Sleep(cn.readDelay)
+ }
+ if cn.readErr != nil {
+ return 0, cn.readErr
+ }
+ return 0, badConnError("bad connection")
+}
+
+func (cn *badConn) Write([]byte) (int, error) {
+ if cn.writeDelay != 0 {
+ time.Sleep(cn.writeDelay)
+ }
+ if cn.writeErr != nil {
+ return 0, cn.writeErr
+ }
+ return 0, badConnError("bad connection")
+}
+
+//------------------------------------------------------------------------------
+
+type hook struct {
+ beforeProcess func(ctx context.Context, cmd redis.Cmder) (context.Context, error)
+ afterProcess func(ctx context.Context, cmd redis.Cmder) error
+
+ beforeProcessPipeline func(ctx context.Context, cmds []redis.Cmder) (context.Context, error)
+ afterProcessPipeline func(ctx context.Context, cmds []redis.Cmder) error
+}
+
+func (h *hook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ if h.beforeProcess != nil {
+ return h.beforeProcess(ctx, cmd)
+ }
+ return ctx, nil
+}
+
+func (h *hook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
+ if h.afterProcess != nil {
+ return h.afterProcess(ctx, cmd)
+ }
+ return nil
+}
+
+func (h *hook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ if h.beforeProcessPipeline != nil {
+ return h.beforeProcessPipeline(ctx, cmds)
+ }
+ return ctx, nil
+}
+
+func (h *hook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
+ if h.afterProcessPipeline != nil {
+ return h.afterProcessPipeline(ctx, cmds)
+ }
+ return nil
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go
new file mode 100644
index 0000000..a4abe32
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go
@@ -0,0 +1,429 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+ // Allow returns nil if operation is allowed or an error otherwise.
+ // If operation is allowed client must ReportResult of the operation
+ // whether it is a success or a failure.
+ Allow() error
+ // ReportResult reports the result of the previously allowed operation.
+ // nil indicates a success, non-nil error usually indicates a failure.
+ ReportResult(result error)
+}
+
+// Options keeps the settings to setup redis connection.
+type Options struct {
+ // The network type, either tcp or unix.
+ // Default is tcp.
+ Network string
+ // host:port address.
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ // Use the specified Username to authenticate the current connection
+ // with one of the connections defined in the ACL list when connecting
+ // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Username string
+ // Optional password. Must match the password specified in the
+ // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+ // or the User Password when connecting to a Redis 6.0 instance, or greater,
+ // that is using the Redis ACL system.
+ Password string
+
+ // Database to be selected after connecting to the server.
+ DB int
+
+ // Maximum number of retries before giving up.
+ // Default is 3 retries; -1 (not 0) disables retries.
+ MaxRetries int
+ // Minimum backoff between each retry.
+ // Default is 8 milliseconds; -1 disables backoff.
+ MinRetryBackoff time.Duration
+ // Maximum backoff between each retry.
+ // Default is 512 milliseconds; -1 disables backoff.
+ MaxRetryBackoff time.Duration
+
+ // Dial timeout for establishing new connections.
+ // Default is 5 seconds.
+ DialTimeout time.Duration
+ // Timeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+ // Default is 3 seconds.
+ ReadTimeout time.Duration
+ // Timeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is ReadTimeout.
+ WriteTimeout time.Duration
+
+ // Type of connection pool.
+ // true for FIFO pool, false for LIFO pool.
+ // Note that fifo has higher overhead compared to lifo.
+ PoolFIFO bool
+ // Maximum number of socket connections.
+ // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+ PoolSize int
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ MinIdleConns int
+ // Connection age at which client retires (closes) the connection.
+ // Default is to not close aged connections.
+ MaxConnAge time.Duration
+ // Amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ // Default is ReadTimeout + 1 second.
+ PoolTimeout time.Duration
+ // Amount of time after which client closes idle connections.
+ // Should be less than server's timeout.
+ // Default is 5 minutes. -1 disables idle timeout check.
+ IdleTimeout time.Duration
+ // Frequency of idle checks made by idle connections reaper.
+ // Default is 1 minute. -1 disables idle connections reaper,
+ // but idle connections are still discarded by the client
+ // if IdleTimeout is set.
+ IdleCheckFrequency time.Duration
+
+ // Enables read only queries on slave nodes.
+ readOnly bool
+
+ // TLS Config to use. When set TLS will be negotiated.
+ TLSConfig *tls.Config
+
+ // Limiter interface used to implemented circuit breaker or rate limiter.
+ Limiter Limiter
+}
+
+func (opt *Options) init() {
+ if opt.Addr == "" {
+ opt.Addr = "localhost:6379"
+ }
+ if opt.Network == "" {
+ if strings.HasPrefix(opt.Addr, "/") {
+ opt.Network = "unix"
+ } else {
+ opt.Network = "tcp"
+ }
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
+ }
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ }
+ if opt.IdleTimeout == 0 {
+ opt.IdleTimeout = 5 * time.Minute
+ }
+ if opt.IdleCheckFrequency == 0 {
+ opt.IdleCheckFrequency = time.Minute
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *Options) clone() *Options {
+ clone := *opt
+ return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+// redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+// Examples:
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ h, p, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ h = u.Host
+ }
+ if h == "" {
+ h = "localhost"
+ }
+ if p == "" {
+ p = "6379"
+ }
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{ServerName: h}
+ }
+
+ return setupConnParams(u, o)
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+ o.Username, o.Password = getUserPassword(u)
+ return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+ q url.Values
+ err error
+}
+
+func (o *queryOptions) string(name string) string {
+ vs := o.q[name]
+ if len(vs) == 0 {
+ return ""
+ }
+ delete(o.q, name) // enable detection of unknown parameters
+ return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ i, err := strconv.Atoi(s)
+ if err == nil {
+ return i
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ // try plain number first
+ if i, err := strconv.Atoi(s); err == nil {
+ if i <= 0 {
+ // disable timeouts
+ return -1
+ }
+ return time.Duration(i) * time.Second
+ }
+ dur, err := time.ParseDuration(s)
+ if err == nil {
+ return dur
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+ switch s := o.string(name); s {
+ case "true", "1":
+ return true
+ case "false", "0", "":
+ return false
+ default:
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+ }
+ return false
+ }
+}
+
+func (o *queryOptions) remaining() []string {
+ if len(o.q) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(o.q))
+ for k := range o.q {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+ q := queryOptions{q: u.Query()}
+
+ // compat: a future major release may use q.int("db")
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+ }
+
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxConnAge = q.duration("max_conn_age")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.IdleTimeout = q.duration("idle_timeout")
+ o.IdleCheckFrequency = q.duration("idle_check_frequency")
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return opt.Dialer(ctx, opt.Network, opt.Addr)
+ },
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go
new file mode 100644
index 0000000..1450523
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go
@@ -0,0 +1,216 @@
+//go:build go1.7
+// +build go1.7
+
+package redis
+
+import (
+ "crypto/tls"
+ "errors"
+ "testing"
+ "time"
+)
+
+func TestParseURL(t *testing.T) {
+ cases := []struct {
+ url string
+ o *Options // expected value
+ err error
+ }{
+ {
+ url: "redis://localhost:123/1",
+ o: &Options{Addr: "localhost:123", DB: 1},
+ }, {
+ url: "redis://localhost:123",
+ o: &Options{Addr: "localhost:123"},
+ }, {
+ url: "redis://localhost/1",
+ o: &Options{Addr: "localhost:6379", DB: 1},
+ }, {
+ url: "redis://12345",
+ o: &Options{Addr: "12345:6379"},
+ }, {
+ url: "rediss://localhost:123",
+ o: &Options{Addr: "localhost:123", TLSConfig: &tls.Config{ /* no deep comparison */ }},
+ }, {
+ url: "redis://:bar@localhost:123",
+ o: &Options{Addr: "localhost:123", Password: "bar"},
+ }, {
+ url: "redis://foo@localhost:123",
+ o: &Options{Addr: "localhost:123", Username: "foo"},
+ }, {
+ url: "redis://foo:bar@localhost:123",
+ o: &Options{Addr: "localhost:123", Username: "foo", Password: "bar"},
+ }, {
+ // multiple params
+ url: "redis://localhost:123/?db=2&read_timeout=2&pool_fifo=true",
+ o: &Options{Addr: "localhost:123", DB: 2, ReadTimeout: 2 * time.Second, PoolFIFO: true},
+ }, {
+ // special case handling for disabled timeouts
+ url: "redis://localhost:123/?db=2&idle_timeout=0",
+ o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ }, {
+ // negative values disable timeouts as well
+ url: "redis://localhost:123/?db=2&idle_timeout=-1",
+ o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ }, {
+ // absent timeout values will use defaults
+ url: "redis://localhost:123/?db=2&idle_timeout=",
+ o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ }, {
+ url: "redis://localhost:123/?db=2&idle_timeout", // missing "=" at the end
+ o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ }, {
+ url: "unix:///tmp/redis.sock",
+ o: &Options{Addr: "/tmp/redis.sock"},
+ }, {
+ url: "unix://foo:bar@/tmp/redis.sock",
+ o: &Options{Addr: "/tmp/redis.sock", Username: "foo", Password: "bar"},
+ }, {
+ url: "unix://foo:bar@/tmp/redis.sock?db=3",
+ o: &Options{Addr: "/tmp/redis.sock", Username: "foo", Password: "bar", DB: 3},
+ }, {
+ // invalid db format
+ url: "unix://foo:bar@/tmp/redis.sock?db=test",
+ err: errors.New(`redis: invalid database number: strconv.Atoi: parsing "test": invalid syntax`),
+ }, {
+ // invalid int value
+ url: "redis://localhost/?pool_size=five",
+ err: errors.New(`redis: invalid pool_size number: strconv.Atoi: parsing "five": invalid syntax`),
+ }, {
+ // invalid bool value
+ url: "redis://localhost/?pool_fifo=yes",
+ err: errors.New(`redis: invalid pool_fifo boolean: expected true/false/1/0 or an empty string, got "yes"`),
+ }, {
+ // it returns first error
+ url: "redis://localhost/?db=foo&pool_size=five",
+ err: errors.New(`redis: invalid database number: strconv.Atoi: parsing "foo": invalid syntax`),
+ }, {
+ url: "redis://localhost/?abc=123",
+ err: errors.New("redis: unexpected option: abc"),
+ }, {
+ url: "redis://foo@localhost/?username=bar",
+ err: errors.New("redis: unexpected option: username"),
+ }, {
+ url: "redis://localhost/?wrte_timout=10s&abc=123",
+ err: errors.New("redis: unexpected option: abc, wrte_timout"),
+ }, {
+ url: "http://google.com",
+ err: errors.New("redis: invalid URL scheme: http"),
+ }, {
+ url: "redis://localhost/1/2/3/4",
+ err: errors.New("redis: invalid URL path: /1/2/3/4"),
+ }, {
+ url: "12345",
+ err: errors.New("redis: invalid URL scheme: "),
+ }, {
+ url: "redis://localhost/iamadatabase",
+ err: errors.New(`redis: invalid database number: "iamadatabase"`),
+ },
+ }
+
+ for i := range cases {
+ tc := cases[i]
+ t.Run(tc.url, func(t *testing.T) {
+ t.Parallel()
+
+ actual, err := ParseURL(tc.url)
+ if tc.err == nil && err != nil {
+ t.Fatalf("unexpected error: %q", err)
+ return
+ }
+ if tc.err != nil && err != nil {
+ if tc.err.Error() != err.Error() {
+ t.Fatalf("got %q, expected %q", err, tc.err)
+ }
+ return
+ }
+ comprareOptions(t, actual, tc.o)
+ })
+ }
+}
+
+func comprareOptions(t *testing.T, actual, expected *Options) {
+ t.Helper()
+
+ if actual.Addr != expected.Addr {
+ t.Errorf("got %q, want %q", actual.Addr, expected.Addr)
+ }
+ if actual.DB != expected.DB {
+ t.Errorf("DB: got %q, expected %q", actual.DB, expected.DB)
+ }
+ if actual.TLSConfig == nil && expected.TLSConfig != nil {
+ t.Errorf("got nil TLSConfig, expected a TLSConfig")
+ }
+ if actual.TLSConfig != nil && expected.TLSConfig == nil {
+ t.Errorf("got TLSConfig, expected no TLSConfig")
+ }
+ if actual.Username != expected.Username {
+ t.Errorf("Username: got %q, expected %q", actual.Username, expected.Username)
+ }
+ if actual.Password != expected.Password {
+ t.Errorf("Password: got %q, expected %q", actual.Password, expected.Password)
+ }
+ if actual.MaxRetries != expected.MaxRetries {
+ t.Errorf("MaxRetries: got %v, expected %v", actual.MaxRetries, expected.MaxRetries)
+ }
+ if actual.MinRetryBackoff != expected.MinRetryBackoff {
+ t.Errorf("MinRetryBackoff: got %v, expected %v", actual.MinRetryBackoff, expected.MinRetryBackoff)
+ }
+ if actual.MaxRetryBackoff != expected.MaxRetryBackoff {
+ t.Errorf("MaxRetryBackoff: got %v, expected %v", actual.MaxRetryBackoff, expected.MaxRetryBackoff)
+ }
+ if actual.DialTimeout != expected.DialTimeout {
+ t.Errorf("DialTimeout: got %v, expected %v", actual.DialTimeout, expected.DialTimeout)
+ }
+ if actual.ReadTimeout != expected.ReadTimeout {
+ t.Errorf("ReadTimeout: got %v, expected %v", actual.ReadTimeout, expected.ReadTimeout)
+ }
+ if actual.WriteTimeout != expected.WriteTimeout {
+ t.Errorf("WriteTimeout: got %v, expected %v", actual.WriteTimeout, expected.WriteTimeout)
+ }
+ if actual.PoolFIFO != expected.PoolFIFO {
+ t.Errorf("PoolFIFO: got %v, expected %v", actual.PoolFIFO, expected.PoolFIFO)
+ }
+ if actual.PoolSize != expected.PoolSize {
+ t.Errorf("PoolSize: got %v, expected %v", actual.PoolSize, expected.PoolSize)
+ }
+ if actual.MinIdleConns != expected.MinIdleConns {
+ t.Errorf("MinIdleConns: got %v, expected %v", actual.MinIdleConns, expected.MinIdleConns)
+ }
+ if actual.MaxConnAge != expected.MaxConnAge {
+ t.Errorf("MaxConnAge: got %v, expected %v", actual.MaxConnAge, expected.MaxConnAge)
+ }
+ if actual.PoolTimeout != expected.PoolTimeout {
+ t.Errorf("PoolTimeout: got %v, expected %v", actual.PoolTimeout, expected.PoolTimeout)
+ }
+ if actual.IdleTimeout != expected.IdleTimeout {
+ t.Errorf("IdleTimeout: got %v, expected %v", actual.IdleTimeout, expected.IdleTimeout)
+ }
+ if actual.IdleCheckFrequency != expected.IdleCheckFrequency {
+ t.Errorf("IdleCheckFrequency: got %v, expected %v", actual.IdleCheckFrequency, expected.IdleCheckFrequency)
+ }
+}
+
+// Test ReadTimeout option initialization, including special values -1 and 0.
+// And also test behaviour of WriteTimeout option, when it is not explicitly set and use
+// ReadTimeout value.
+func TestReadTimeoutOptions(t *testing.T) {
+ testDataInputOutputMap := map[time.Duration]time.Duration{
+ -1: 0 * time.Second,
+ 0: 3 * time.Second,
+ 1: 1 * time.Nanosecond,
+ 3: 3 * time.Nanosecond,
+ }
+
+ for in, out := range testDataInputOutputMap {
+ o := &Options{ReadTimeout: in}
+ o.init()
+ if o.ReadTimeout != out {
+ t.Errorf("got %d instead of %d as ReadTimeout option", o.ReadTimeout, out)
+ }
+
+ if o.WriteTimeout != o.ReadTimeout {
+ t.Errorf("got %d instead of %d as WriteTimeout option", o.WriteTimeout, o.ReadTimeout)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
new file mode 100644
index 0000000..e4ea4bb
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
@@ -0,0 +1,8 @@
+{
+ "name": "redis",
+ "version": "8.11.5",
+ "main": "index.js",
+ "repository": "git@github.com:go-redis/redis.git",
+ "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+ "license": "BSD-2-clause"
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go
new file mode 100644
index 0000000..31bab97
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go
@@ -0,0 +1,147 @@
+package redis
+
+import (
+ "context"
+ "sync"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+ Len() int
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Close() error
+ Discard() error
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ ctx context.Context
+ exec pipelineExecer
+
+ mu sync.Mutex
+ cmds []Cmder
+ closed bool
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+ c.mu.Lock()
+ ln := len(c.cmds)
+ c.mu.Unlock()
+ return ln
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.mu.Lock()
+ c.cmds = append(c.cmds, cmd)
+ c.mu.Unlock()
+ return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+ c.mu.Lock()
+ _ = c.discard()
+ c.closed = true
+ c.mu.Unlock()
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+ c.mu.Lock()
+ err := c.discard()
+ c.mu.Unlock()
+ return err
+}
+
+func (c *Pipeline) discard() error {
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ cmds, err := c.Exec(ctx)
+ _ = c.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go
new file mode 100644
index 0000000..f24114d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go
@@ -0,0 +1,104 @@
+package redis_test
+
+import (
+ "strconv"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("pipelining", func() {
+ var client *redis.Client
+ var pipe *redis.Pipeline
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("supports block style", func() {
+ var get *redis.StringCmd
+ cmds, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ get = pipe.Get(ctx, "foo")
+ return nil
+ })
+ Expect(err).To(Equal(redis.Nil))
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0]).To(Equal(get))
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+ })
+
+ assertPipeline := func() {
+ It("returns no errors when there are no commands", func() {
+ _, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("discards queued commands", func() {
+ pipe.Get(ctx, "key")
+ pipe.Discard()
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(BeNil())
+ })
+
+ It("handles val/err", func() {
+ err := client.Set(ctx, "key", "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ get := pipe.Get(ctx, "key")
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+
+ val, err := get.Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("value"))
+ })
+
+ It("supports custom command", func() {
+ pipe.Do(ctx, "ping")
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ })
+
+ It("handles large pipelines", func() {
+ for callCount := 1; callCount < 16; callCount++ {
+ for i := 1; i <= callCount; i++ {
+ pipe.SetNX(ctx, strconv.Itoa(i)+"_key", strconv.Itoa(i)+"_value", 0)
+ }
+
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(callCount))
+ for _, cmd := range cmds {
+ Expect(cmd).To(BeAssignableToTypeOf(&redis.BoolCmd{}))
+ }
+ }
+ })
+ }
+
+ Describe("Pipeline", func() {
+ BeforeEach(func() {
+ pipe = client.Pipeline().(*redis.Pipeline)
+ })
+
+ assertPipeline()
+ })
+
+ Describe("TxPipeline", func() {
+ BeforeEach(func() {
+ pipe = client.TxPipeline().(*redis.Pipeline)
+ })
+
+ assertPipeline()
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go
new file mode 100644
index 0000000..dbef72e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go
@@ -0,0 +1,157 @@
+package redis_test
+
+import (
+ "context"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("pool", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ opt := redisOptions()
+ opt.MinIdleConns = 0
+ opt.MaxConnAge = 0
+ opt.IdleTimeout = time.Second
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("respects max size", func() {
+ perform(1000, func(id int) {
+ val, err := client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.IdleLen()))
+ })
+
+ It("respects max size on multi", func() {
+ perform(1000, func(id int) {
+ var ping *redis.StatusCmd
+
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ ping = pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.IdleLen()))
+ })
+
+ It("respects max size on pipelines", func() {
+ perform(1000, func(id int) {
+ pipe := client.Pipeline()
+ ping := pipe.Ping(ctx)
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.IdleLen()))
+ })
+
+ It("removes broken connections", func() {
+ cn, err := client.Pool().Get(context.Background())
+ Expect(err).NotTo(HaveOccurred())
+ cn.SetNetConn(&badConn{})
+ client.Pool().Put(ctx, cn)
+
+ err = client.Ping(ctx).Err()
+ Expect(err).To(MatchError("bad connection"))
+
+ val, err := client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(Equal(1))
+ Expect(pool.IdleLen()).To(Equal(1))
+
+ stats := pool.Stats()
+ Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Misses).To(Equal(uint32(2)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("reuses connections", func() {
+ // explain: https://github.com/go-redis/redis/pull/1675
+ opt := redisOptions()
+ opt.MinIdleConns = 0
+ opt.MaxConnAge = 0
+ opt.IdleTimeout = 2 * time.Second
+ client = redis.NewClient(opt)
+
+ for i := 0; i < 100; i++ {
+ val, err := client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ }
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(Equal(1))
+ Expect(pool.IdleLen()).To(Equal(1))
+
+ stats := pool.Stats()
+ Expect(stats.Hits).To(Equal(uint32(99)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("removes idle connections", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats).To(Equal(&redis.PoolStats{
+ Hits: 0,
+ Misses: 1,
+ Timeouts: 0,
+ TotalConns: 1,
+ IdleConns: 1,
+ StaleConns: 0,
+ }))
+
+ time.Sleep(2 * time.Second)
+
+ stats = client.PoolStats()
+ Expect(stats).To(Equal(&redis.PoolStats{
+ Hits: 0,
+ Misses: 1,
+ Timeouts: 0,
+ TotalConns: 0,
+ IdleConns: 0,
+ StaleConns: 1,
+ }))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go
new file mode 100644
index 0000000..efc2354
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go
@@ -0,0 +1,668 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh *channel
+ allCh *channel
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) String() string {
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(ctx, nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(ctx, channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(ctx, cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
+ c.mu.Lock()
+ c.releaseConn(ctx, cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout, c.opt.Addr) {
+ c.reconnect(ctx, err)
+ }
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ err := c.subscribe(ctx, "unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
+ return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(ctx, args...)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ cn, err := c.conn(ctx, nil)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd(ctx)
+ }
+
+ // Don't hold the lock to allow subscriptions and pings.
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.msgCh = newChannel(c, opts...)
+ c.msgCh.initMsgChan()
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.allCh = newChannel(c, WithChannelSize(size))
+ c.allCh.initAllChan()
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+ return func(c *channel) {
+ c.chanSize = size
+ }
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.checkInterval = d
+ }
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.chanSendTimeout = d
+ }
+}
+
+type channel struct {
+ pubSub *PubSub
+
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+
+ chanSize int
+ chanSendTimeout time.Duration
+ checkInterval time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+ c := &channel{
+ pubSub: pubSub,
+
+ chanSize: 100,
+ chanSendTimeout: time.Minute,
+ checkInterval: 3 * time.Second,
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.checkInterval > 0 {
+ c.initHealthCheck()
+ }
+ return c
+}
+
+func (c *channel) initHealthCheck() {
+ ctx := context.TODO()
+ c.ping = make(chan struct{}, 1)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ for {
+ timer.Reset(c.checkInterval)
+ select {
+ case <-c.ping:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+ c.pubSub.mu.Lock()
+ c.pubSub.reconnect(ctx, pingErr)
+ c.pubSub.mu.Unlock()
+ }
+ case <-c.pubSub.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *channel) initMsgChan() {
+ ctx := context.TODO()
+ c.msgCh = make(chan *Message, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *channel) initAllChan() {
+ ctx := context.TODO()
+ c.allCh = make(chan interface{}, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Pong:
+ // Ignore.
+ case *Subscription, *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go
new file mode 100644
index 0000000..2dfa66b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go
@@ -0,0 +1,495 @@
+package redis_test
+
+import (
+ "context"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("PubSub", func() {
+ var client *redis.Client
+ var clientID int64
+
+ BeforeEach(func() {
+ opt := redisOptions()
+ opt.MinIdleConns = 0
+ opt.MaxConnAge = 0
+ opt.OnConnect = func(ctx context.Context, cn *redis.Conn) (err error) {
+ clientID, err = cn.ClientID(ctx).Result()
+ return err
+ }
+ client = redis.NewClient(opt)
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("implements Stringer", func() {
+ pubsub := client.PSubscribe(ctx, "mychannel*")
+ defer pubsub.Close()
+
+ Expect(pubsub.String()).To(Equal("PubSub(mychannel*)"))
+ })
+
+ It("should support pattern matching", func() {
+ pubsub := client.PSubscribe(ctx, "mychannel*")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("psubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel*"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).To(BeNil())
+ }
+
+ n, err := client.Publish(ctx, "mychannel1", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.PUnsubscribe(ctx, "mychannel*")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Message)
+ Expect(subscr.Channel).To(Equal("mychannel1"))
+ Expect(subscr.Pattern).To(Equal("mychannel*"))
+ Expect(subscr.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("punsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel*"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ })
+
+ It("should pub/sub channels", func() {
+ channels, err := client.PubSubChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ pubsub := client.Subscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err = client.PubSubChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"}))
+
+ channels, err = client.PubSubChannels(ctx, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ channels, err = client.PubSubChannels(ctx, "*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(channels)).To(BeNumerically(">=", 2))
+ })
+
+ It("should return the numbers of subscribers", func() {
+ pubsub := client.Subscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err := client.PubSubNumSub(ctx, "mychannel", "mychannel2", "mychannel3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(Equal(map[string]int64{
+ "mychannel": 1,
+ "mychannel2": 1,
+ "mychannel3": 0,
+ }))
+ })
+
+ It("should return the numbers of subscribers by pattern", func() {
+ num, err := client.PubSubNumPat(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(0)))
+
+ pubsub := client.PSubscribe(ctx, "*")
+ defer pubsub.Close()
+
+ num, err = client.PubSubNumPat(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(1)))
+ })
+
+ It("should pub/sub", func() {
+ pubsub := client.Subscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("subscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("subscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(2))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).NotTo(HaveOccurred())
+ }
+
+ n, err := client.Publish(ctx, "mychannel", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Publish(ctx, "mychannel2", "hello2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.Unsubscribe(ctx, "mychannel", "mychannel2")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel2"))
+ Expect(msg.Payload).To(Equal("hello2"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("unsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("unsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ })
+
+ It("should ping/pong", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ _, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pubsub.Ping(ctx, "")
+ Expect(err).NotTo(HaveOccurred())
+
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ pong := msgi.(*redis.Pong)
+ Expect(pong.Payload).To(Equal(""))
+ })
+
+ It("should ping/pong with payload", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ _, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pubsub.Ping(ctx, "hello")
+ Expect(err).NotTo(HaveOccurred())
+
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ pong := msgi.(*redis.Pong)
+ Expect(pong.Payload).To(Equal("hello"))
+ })
+
+ It("should multi-ReceiveMessage", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "subscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ err = client.Publish(ctx, "mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.Publish(ctx, "mychannel", "world").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ msg, err := pubsub.ReceiveMessage(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ msg, err = pubsub.ReceiveMessage(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("world"))
+ })
+
+ It("returns an error when subscribe fails", func() {
+ pubsub := client.Subscribe(ctx)
+ defer pubsub.Close()
+
+ pubsub.SetNetConn(&badConn{
+ readErr: io.EOF,
+ writeErr: io.EOF,
+ })
+
+ err := pubsub.Subscribe(ctx, "mychannel")
+ Expect(err).To(MatchError("EOF"))
+
+ err = pubsub.Subscribe(ctx, "mychannel")
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ expectReceiveMessageOnError := func(pubsub *redis.PubSub) {
+ pubsub.SetNetConn(&badConn{
+ readErr: io.EOF,
+ writeErr: io.EOF,
+ })
+
+ step := make(chan struct{}, 3)
+
+ go func() {
+ defer GinkgoRecover()
+
+ Eventually(step).Should(Receive())
+ err := client.Publish(ctx, "mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+ step <- struct{}{}
+ }()
+
+ _, err := pubsub.ReceiveMessage(ctx)
+ Expect(err).To(Equal(io.EOF))
+ step <- struct{}{}
+
+ msg, err := pubsub.ReceiveMessage(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ Eventually(step).Should(Receive())
+ }
+
+ It("Subscribe should reconnect on ReceiveMessage error", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "subscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ expectReceiveMessageOnError(pubsub)
+ })
+
+ It("PSubscribe should reconnect on ReceiveMessage error", func() {
+ pubsub := client.PSubscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "psubscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ expectReceiveMessageOnError(pubsub)
+ })
+
+ It("should return on Close", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+
+ wg.Done()
+ defer wg.Done()
+
+ _, err := pubsub.ReceiveMessage(ctx)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(SatisfyAny(
+ Equal("redis: client is closed"),
+ ContainSubstring("use of closed network connection"),
+ ))
+ }()
+
+ wg.Wait()
+ wg.Add(1)
+
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+ wg.Wait()
+ })
+
+ It("should ReceiveMessage without a subscription", func() {
+ timeout := 100 * time.Millisecond
+
+ pubsub := client.Subscribe(ctx)
+ defer pubsub.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ time.Sleep(timeout)
+
+ err := pubsub.Subscribe(ctx, "mychannel")
+ Expect(err).NotTo(HaveOccurred())
+
+ time.Sleep(timeout)
+
+ err = client.Publish(ctx, "mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }()
+
+ msg, err := pubsub.ReceiveMessage(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ wg.Wait()
+ })
+
+ It("handles big message payload", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ ch := pubsub.Channel()
+
+ bigVal := bigVal()
+ err := client.Publish(ctx, "mychannel", bigVal).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var msg *redis.Message
+ Eventually(ch).Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal(string(bigVal)))
+ })
+
+ It("handles message payload slice with server-assisted client-size caching", func() {
+ pubsub := client.Subscribe(ctx, "__redis__:invalidate")
+ defer pubsub.Close()
+
+ client2 := redis.NewClient(redisOptions())
+ defer client2.Close()
+
+ err := client2.Do(ctx, "CLIENT", "TRACKING", "on", "REDIRECT", clientID).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client2.Do(ctx, "GET", "mykey").Err()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client2.Do(ctx, "SET", "mykey", "myvalue").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ ch := pubsub.Channel()
+
+ var msg *redis.Message
+ Eventually(ch).Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("__redis__:invalidate"))
+ Expect(msg.PayloadSlice).To(Equal([]string{"mykey"}))
+ })
+
+ It("supports concurrent Ping and Receive", func() {
+ const N = 100
+
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ done := make(chan struct{})
+ go func() {
+ defer GinkgoRecover()
+
+ for i := 0; i < N; i++ {
+ _, err := pubsub.ReceiveTimeout(ctx, 5*time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ close(done)
+ }()
+
+ for i := 0; i < N; i++ {
+ err := pubsub.Ping(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ select {
+ case <-done:
+ case <-time.After(30 * time.Second):
+ Fail("timeout")
+ }
+ })
+
+ It("should ChannelMessage", func() {
+ pubsub := client.Subscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ ch := pubsub.Channel(
+ redis.WithChannelSize(10),
+ redis.WithChannelHealthCheckInterval(time.Second),
+ )
+
+ text := "test channel message"
+ err := client.Publish(ctx, "mychannel", text).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var msg *redis.Message
+ Eventually(ch).Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal(text))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go
new file mode 100644
index 0000000..34699d1
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go
@@ -0,0 +1,392 @@
+package redis_test
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("races", func() {
+ var client *redis.Client
+ var C, N int
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).To(BeNil())
+
+ C, N = 10, 1000
+ if testing.Short() {
+ C = 4
+ N = 100
+ }
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should echo", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ msg := fmt.Sprintf("echo %d %d", id, i)
+ echo, err := client.Echo(ctx, msg).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(echo).To(Equal(msg))
+ }
+ })
+ })
+
+ It("should incr", func() {
+ key := "TestIncrFromGoroutines"
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Incr(ctx, key).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := client.Get(ctx, key).Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("should handle many keys", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Set(
+ ctx,
+ fmt.Sprintf("keys.key-%d-%d", id, i),
+ fmt.Sprintf("hello-%d-%d", id, i),
+ 0,
+ ).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ keys := client.Keys(ctx, "keys.*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(len(keys.Val())).To(Equal(C * N))
+ })
+
+ It("should handle many keys 2", func() {
+ perform(C, func(id int) {
+ keys := []string{"non-existent-key"}
+ for i := 0; i < N; i++ {
+ key := fmt.Sprintf("keys.key-%d", i)
+ keys = append(keys, key)
+
+ err := client.Set(ctx, key, fmt.Sprintf("hello-%d", i), 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ keys = append(keys, "non-existent-key")
+
+ vals, err := client.MGet(ctx, keys...).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(vals)).To(Equal(N + 2))
+
+ for i := 0; i < N; i++ {
+ Expect(vals[i+1]).To(Equal(fmt.Sprintf("hello-%d", i)))
+ }
+
+ Expect(vals[0]).To(BeNil())
+ Expect(vals[N+1]).To(BeNil())
+ })
+ })
+
+ It("should handle big vals in Get", func() {
+ C, N = 4, 100
+
+ bigVal := bigVal()
+
+ err := client.Set(ctx, "key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reconnect to get new connection.
+ Expect(client.Close()).To(BeNil())
+ client = redis.NewClient(redisOptions())
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ got, err := client.Get(ctx, "key").Bytes()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(got).To(Equal(bigVal))
+ }
+ })
+ })
+
+ It("should handle big vals in Set", func() {
+ C, N = 4, 100
+
+ bigVal := bigVal()
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Set(ctx, "key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+ })
+
+ It("should select db", func() {
+ err := client.Set(ctx, "db", 1, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ perform(C, func(id int) {
+ opt := redisOptions()
+ opt.DB = id
+ client := redis.NewClient(opt)
+ for i := 0; i < N; i++ {
+ err := client.Set(ctx, "db", id, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Get(ctx, "db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(id)))
+ }
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ n, err := client.Get(ctx, "db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ })
+
+ It("should select DB with read timeout", func() {
+ perform(C, func(id int) {
+ opt := redisOptions()
+ opt.DB = id
+ opt.ReadTimeout = time.Nanosecond
+ client := redis.NewClient(opt)
+
+ perform(C, func(id int) {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ })
+
+ It("should Watch/Unwatch", func() {
+ err := client.Set(ctx, "key", "0", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ val, err := tx.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(Equal(redis.Nil))
+
+ num, err := strconv.ParseInt(val, 10, 64)
+ Expect(err).NotTo(HaveOccurred())
+
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "key", strconv.FormatInt(num+1, 10), 0)
+ return nil
+ })
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key")
+ if err == redis.TxFailedErr {
+ i--
+ continue
+ }
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("should Pipeline", func() {
+ perform(C, func(id int) {
+ pipe := client.Pipeline()
+ for i := 0; i < N; i++ {
+ pipe.Echo(ctx, fmt.Sprint(i))
+ }
+
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(N))
+
+ for i := 0; i < N; i++ {
+ Expect(cmds[i].(*redis.StringCmd).Val()).To(Equal(fmt.Sprint(i)))
+ }
+ })
+ })
+
+ It("should Pipeline", func() {
+ pipe := client.Pipeline()
+ perform(N, func(id int) {
+ pipe.Incr(ctx, "key")
+ })
+
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(N))
+
+ n, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(N)))
+ })
+
+ It("should TxPipeline", func() {
+ pipe := client.TxPipeline()
+ perform(N, func(id int) {
+ pipe.Incr(ctx, "key")
+ })
+
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(N))
+
+ n, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(N)))
+ })
+
+ PIt("should BLPop", func() {
+ var received uint32
+
+ wg := performAsync(C, func(id int) {
+ for {
+ v, err := client.BLPop(ctx, 5*time.Second, "list").Result()
+ if err != nil {
+ if err == redis.Nil {
+ break
+ }
+ Expect(err).NotTo(HaveOccurred())
+ }
+ Expect(v).To(Equal([]string{"list", "hello"}))
+ atomic.AddUint32(&received, 1)
+ }
+ })
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.LPush(ctx, "list", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ wg.Wait()
+ Expect(atomic.LoadUint32(&received)).To(Equal(uint32(C * N)))
+ })
+
+ It("should WithContext", func() {
+ perform(C, func(_ int) {
+ err := client.WithContext(ctx).Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ })
+
+ It("should abort on context timeout", func() {
+ opt := redisClusterOptions()
+ client := cluster.newClusterClient(ctx, opt)
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ wg := performAsync(C, func(_ int) {
+ _, err := client.XRead(ctx, &redis.XReadArgs{
+ Streams: []string{"test", "$"},
+ Block: 1 * time.Second,
+ }).Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(Or(Equal(context.Canceled.Error()), ContainSubstring("operation was canceled")))
+ })
+
+ time.Sleep(10 * time.Millisecond)
+ cancel()
+ wg.Wait()
+ })
+})
+
+var _ = Describe("cluster races", func() {
+ var client *redis.ClusterClient
+ var C, N int
+
+ BeforeEach(func() {
+ opt := redisClusterOptions()
+ client = cluster.newClusterClient(ctx, opt)
+
+ C, N = 10, 1000
+ if testing.Short() {
+ C = 4
+ N = 100
+ }
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should echo", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ msg := fmt.Sprintf("echo %d %d", id, i)
+ echo, err := client.Echo(ctx, msg).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(echo).To(Equal(msg))
+ }
+ })
+ })
+
+ It("should get", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ key := fmt.Sprintf("key_%d_%d", id, i)
+ _, err := client.Get(ctx, key).Result()
+ Expect(err).To(Equal(redis.Nil))
+ }
+ })
+ })
+
+ It("should incr", func() {
+ key := "TestIncrFromGoroutines"
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Incr(ctx, key).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := client.Get(ctx, key).Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("write cmd data-race", func() {
+ pubsub := client.Subscribe(ctx)
+ defer pubsub.Close()
+
+ pubsub.Channel(redis.WithChannelHealthCheckInterval(time.Millisecond))
+ for i := 0; i < 100; i++ {
+ key := fmt.Sprintf("channel_%d", i)
+ pubsub.Subscribe(ctx, key)
+ pubsub.Unsubscribe(ctx, key)
+ }
+ })
+})
+
+func bigVal() []byte {
+ return bytes.Repeat([]byte{'*'}, 1<<17) // 128kb
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
new file mode 100644
index 0000000..bcf8a2a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
@@ -0,0 +1,773 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+ AfterProcess(ctx context.Context, cmd Cmder) error
+
+ BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+ AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+ hooks []Hook
+}
+
+func (hs *hooks) lock() {
+ hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+ clone := hs
+ clone.lock()
+ return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+ hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+ ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := fn(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+ if retErr != nil {
+ cmd.SetErr(retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = fn(ctx, cmd)
+ cmd.SetErr(retErr)
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+ retErr = err
+ cmd.SetErr(retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := fn(ctx, cmds)
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+ if retErr != nil {
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = fn(ctx, cmds)
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+ retErr = err
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processTxPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return hs.processPipeline(ctx, cmds, fn)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+ return &baseClient{
+ opt: opt,
+ connPool: connPool,
+ }
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ if c.opt.Password == "" &&
+ c.opt.DB == 0 &&
+ !c.opt.readOnly &&
+ c.opt.OnConnect == nil {
+ return nil
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(ctx, c.opt, connPool)
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if c.opt.Password != "" {
+ if c.opt.Username != "" {
+ pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
+ } else {
+ pipe.Auth(ctx, c.opt.Password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ c.releaseConn(ctx, cn, err)
+ }()
+
+ done := ctx.Done() //nolint:ifshort
+
+ if done == nil {
+ err = fn(ctx, cn)
+ return err
+ }
+
+ errc := make(chan error, 1)
+ go func() { errc <- fn(ctx, cn) }()
+
+ select {
+ case <-done:
+ _ = cn.Close()
+ // Wait for the goroutine to finish and send something.
+ <-errc
+
+ err = ctx.Err()
+ return err
+ case err = <-errc:
+ return err
+ }
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(1)
+ err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ }
+ return err
+ }
+
+ return nil
+ })
+ if err == nil {
+ return false, nil
+ }
+
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ err := c._generalProcessPipeline(ctx, cmds, p)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ var canRetry bool
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ })
+ return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := txPipelineReadQueued(rd, statusCmd, cmds)
+ if err != nil {
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ return false, err
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdCopy := make([]Cmder, len(cmds)+2)
+ cmdCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdCopy[1:], cmds)
+ cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdCopy
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+ *baseClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: newBaseClient(opt, newConnPool(opt)),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+
+ return &c
+}
+
+func (c *Client) clone() *Client {
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := c.clone()
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ return clone
+}
+
+func (c *Client) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := c.clone()
+ clone.ctx = ctx
+ return clone
+}
+
+func (c *Client) Conn(ctx context.Context) *Conn {
+ return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks // TODO: inherit hooks
+}
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ *conn
+ ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ conn: &conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ },
+ ctx: ctx,
+ }
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go
new file mode 100644
index 0000000..095da2d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go
@@ -0,0 +1,449 @@
+package redis_test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "net"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+type redisHookError struct {
+ redis.Hook
+}
+
+var _ redis.Hook = redisHookError{}
+
+func (redisHookError) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ return ctx, nil
+}
+
+func (redisHookError) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
+ return errors.New("hook error")
+}
+
+func TestHookError(t *testing.T) {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ })
+ rdb.AddHook(redisHookError{})
+
+ err := rdb.Ping(ctx).Err()
+ if err == nil {
+ t.Fatalf("got nil, expected an error")
+ }
+
+ wanted := "hook error"
+ if err.Error() != wanted {
+ t.Fatalf(`got %q, wanted %q`, err, wanted)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+var _ = Describe("Client", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ client.Close()
+ })
+
+ It("should Stringer", func() {
+ Expect(client.String()).To(Equal("Redis<:6380 db:15>"))
+ })
+
+ It("supports context", func() {
+ ctx, cancel := context.WithCancel(ctx)
+ cancel()
+
+ err := client.Ping(ctx).Err()
+ Expect(err).To(MatchError("context canceled"))
+ })
+
+ It("supports WithTimeout", func() {
+ err := client.ClientPause(ctx, time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.WithTimeout(10 * time.Millisecond).Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+
+ err = client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should ping", func() {
+ val, err := client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ })
+
+ It("should return pool stats", func() {
+ Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+ })
+
+ It("should support custom dialers", func() {
+ custom := redis.NewClient(&redis.Options{
+ Network: "tcp",
+ Addr: redisAddr,
+ Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ var d net.Dialer
+ return d.DialContext(ctx, network, addr)
+ },
+ })
+
+ val, err := custom.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ Expect(custom.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should close", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ err := client.Ping(ctx).Err()
+ Expect(err).To(MatchError("redis: client is closed"))
+ })
+
+ It("should close pubsub without closing the client", func() {
+ pubsub := client.Subscribe(ctx)
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+ _, err := pubsub.Receive(ctx)
+ Expect(err).To(MatchError("redis: client is closed"))
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close Tx without closing the client", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close pipeline without closing the client", func() {
+ pipeline := client.Pipeline()
+ Expect(pipeline.Close()).NotTo(HaveOccurred())
+
+ pipeline.Ping(ctx)
+ _, err := pipeline.Exec(ctx)
+ Expect(err).To(MatchError("redis: client is closed"))
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close pubsub when client is closed", func() {
+ pubsub := client.Subscribe(ctx)
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ _, err := pubsub.Receive(ctx)
+ Expect(err).To(MatchError("redis: client is closed"))
+
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should close pipeline when client is closed", func() {
+ pipeline := client.Pipeline()
+ Expect(client.Close()).NotTo(HaveOccurred())
+ Expect(pipeline.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should select DB", func() {
+ db2 := redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ DB: 2,
+ })
+ Expect(db2.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ Expect(db2.Get(ctx, "db").Err()).To(Equal(redis.Nil))
+ Expect(db2.Set(ctx, "db", 2, 0).Err()).NotTo(HaveOccurred())
+
+ n, err := db2.Get(ctx, "db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ Expect(client.Get(ctx, "db").Err()).To(Equal(redis.Nil))
+
+ Expect(db2.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ Expect(db2.Close()).NotTo(HaveOccurred())
+ })
+
+ It("processes custom commands", func() {
+ cmd := redis.NewCmd(ctx, "PING")
+ _ = client.Process(ctx, cmd)
+
+ // Flush buffers.
+ Expect(client.Echo(ctx, "hello").Err()).NotTo(HaveOccurred())
+
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("PONG"))
+ })
+
+ It("should retry command on network error", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ client = redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ MaxRetries: 1,
+ })
+
+ // Put bad connection in the pool.
+ cn, err := client.Pool().Get(ctx)
+ Expect(err).NotTo(HaveOccurred())
+
+ cn.SetNetConn(&badConn{})
+ client.Pool().Put(ctx, cn)
+
+ err = client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should retry with backoff", func() {
+ clientNoRetry := redis.NewClient(&redis.Options{
+ Addr: ":1234",
+ MaxRetries: -1,
+ })
+ defer clientNoRetry.Close()
+
+ clientRetry := redis.NewClient(&redis.Options{
+ Addr: ":1234",
+ MaxRetries: 5,
+ MaxRetryBackoff: 128 * time.Millisecond,
+ })
+ defer clientRetry.Close()
+
+ startNoRetry := time.Now()
+ err := clientNoRetry.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ elapseNoRetry := time.Since(startNoRetry)
+
+ startRetry := time.Now()
+ err = clientRetry.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ elapseRetry := time.Since(startRetry)
+
+ Expect(elapseRetry).To(BeNumerically(">", elapseNoRetry, 10*time.Millisecond))
+ })
+
+ It("should update conn.UsedAt on read/write", func() {
+ cn, err := client.Pool().Get(context.Background())
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn.UsedAt).NotTo(BeZero())
+
+ // set cn.SetUsedAt(time) or time.Sleep(>1*time.Second)
+ // simulate the last time Conn was used
+ // time.Sleep() is not the standard sleep time
+ // link: https://go-review.googlesource.com/c/go/+/232298
+ cn.SetUsedAt(time.Now().Add(-1 * time.Second))
+ createdAt := cn.UsedAt()
+
+ client.Pool().Put(ctx, cn)
+ Expect(cn.UsedAt().Equal(createdAt)).To(BeTrue())
+
+ err = client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ cn, err = client.Pool().Get(context.Background())
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ Expect(cn.UsedAt().After(createdAt)).To(BeTrue())
+ })
+
+ It("should process command with special chars", func() {
+ set := client.Set(ctx, "key", "hello1\r\nhello2\r\n", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get(ctx, "key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello1\r\nhello2\r\n"))
+ })
+
+ It("should handle big vals", func() {
+ bigVal := bytes.Repeat([]byte{'*'}, 2e6)
+
+ err := client.Set(ctx, "key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reconnect to get new connection.
+ Expect(client.Close()).NotTo(HaveOccurred())
+ client = redis.NewClient(redisOptions())
+
+ got, err := client.Get(ctx, "key").Bytes()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(got).To(Equal(bigVal))
+ })
+
+ It("should set and scan time", func() {
+ tm := time.Now()
+ err := client.Set(ctx, "now", tm, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var tm2 time.Time
+ err = client.Get(ctx, "now").Scan(&tm2)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(tm2).To(BeTemporally("==", tm))
+ })
+
+ It("should set and scan durations", func() {
+ duration := 10 * time.Minute
+ err := client.Set(ctx, "duration", duration, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var duration2 time.Duration
+ err = client.Get(ctx, "duration").Scan(&duration2)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(duration2).To(Equal(duration))
+ })
+
+ It("should Conn", func() {
+ err := client.Conn(ctx).Get(ctx, "this-key-does-not-exist").Err()
+ Expect(err).To(Equal(redis.Nil))
+ })
+})
+
+var _ = Describe("Client timeout", func() {
+ var opt *redis.Options
+ var client *redis.Client
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ testTimeout := func() {
+ It("Ping timeouts", func() {
+ err := client.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Pipeline timeouts", func() {
+ _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Subscribe timeouts", func() {
+ if opt.WriteTimeout == 0 {
+ return
+ }
+
+ pubsub := client.Subscribe(ctx)
+ defer pubsub.Close()
+
+ err := pubsub.Subscribe(ctx, "_")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx timeouts", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ return tx.Ping(ctx).Err()
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx Pipeline timeouts", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+ }
+
+ Context("read timeout", func() {
+ BeforeEach(func() {
+ opt = redisOptions()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = -1
+ client = redis.NewClient(opt)
+ })
+
+ testTimeout()
+ })
+
+ Context("write timeout", func() {
+ BeforeEach(func() {
+ opt = redisOptions()
+ opt.ReadTimeout = -1
+ opt.WriteTimeout = time.Nanosecond
+ client = redis.NewClient(opt)
+ })
+
+ testTimeout()
+ })
+})
+
+var _ = Describe("Client OnConnect", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ opt := redisOptions()
+ opt.DB = 0
+ opt.OnConnect = func(ctx context.Context, cn *redis.Conn) error {
+ return cn.ClientSetName(ctx, "on_connect").Err()
+ }
+
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("calls OnConnect", func() {
+ name, err := client.ClientGetName(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(name).To(Equal("on_connect"))
+ })
+})
+
+var _ = Describe("Client context cancelation", func() {
+ var opt *redis.Options
+ var client *redis.Client
+
+ BeforeEach(func() {
+ opt = redisOptions()
+ opt.ReadTimeout = -1
+ opt.WriteTimeout = -1
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("Blocking operation cancelation", func() {
+ ctx, cancel := context.WithCancel(ctx)
+ cancel()
+
+ err := client.BLPop(ctx, 1*time.Second, "test").Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(BeIdenticalTo(context.Canceled))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go
new file mode 100644
index 0000000..24cfd49
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go
@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+ var cmd StringStringMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+ var cmd StringIntMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go
new file mode 100644
index 0000000..4df00fc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go
@@ -0,0 +1,736 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided name and options.
+ NewClient func(name string, opt *Options) *Client
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(name string, opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(name, clopt),
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ hash ConsistentHash
+ shards map[string]*ringShard // read only
+ list []*ringShard // read only
+ numShard int
+ closed bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+ shards := make(map[string]*ringShard, len(opt.Addrs))
+ list := make([]*ringShard, 0, len(shards))
+
+ for name, addr := range opt.Addrs {
+ shard := newRingShard(opt, name, addr)
+ shards[name] = shard
+
+ list = append(list, shard)
+ }
+
+ c := &ringShards{
+ opt: opt,
+
+ shards: shards,
+ list: list,
+ }
+ c.rebalance()
+
+ return c
+}
+
+func (c *ringShards) List() []*ringShard {
+ var list []*ringShard
+
+ c.mu.RLock()
+ if !c.closed {
+ list = c.list
+ }
+ c.mu.RUnlock()
+
+ return list
+}
+
+func (c *ringShards) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+ c.mu.RUnlock()
+
+ return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ hash := c.hash.Get(key)
+ if hash == "" {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ shard := c.shards[hash]
+ c.mu.RUnlock()
+
+ return shard, nil
+}
+
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ shard := c.shards[shardName]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ ctx := context.Background()
+ for range ticker.C {
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.rebalance()
+ }
+ }
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+ c.mu.RLock()
+ shards := c.shards
+ c.mu.RUnlock()
+
+ liveShards := make([]string, 0, len(shards))
+
+ for name, shard := range shards {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ hash := c.opt.NewConsistentHash(liveShards)
+
+ c.mu.Lock()
+ c.hash = hash
+ c.numShard = len(liveShards)
+ c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+ c.mu.RLock()
+ l := c.numShard
+ c.mu.RUnlock()
+ return l
+}
+
+func (c *ringShards) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, shard := range c.shards {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.hash = nil
+ c.shards = nil
+ c.list = nil
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+ opt *RingOptions
+ shards *ringShards
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ *ring
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ ring := Ring{
+ ring: &ring{
+ opt: opt,
+ shards: newRingShards(opt),
+ },
+ ctx: context.Background(),
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.shards.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ shards := c.shards.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ shards := c.shards.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ return nil
+ }
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ pos := cmdFirstKeyPos(cmd, cmdInfo)
+ if pos == 0 {
+ return c.shards.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, false)
+ })
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, true)
+ })
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ cmdsMap := make(map[string][]Cmder)
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ if hash != "" {
+ hash = c.shards.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+ ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+ // TODO: retry?
+ shard, err := c.shards.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ if tx {
+ return shard.Client.processTxPipeline(ctx, cmds)
+ }
+ return shard.Client.processPipeline(ctx, cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.shards.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ return c.shards.Close()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go
new file mode 100644
index 0000000..03a49fd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go
@@ -0,0 +1,645 @@
+package redis_test
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("Redis Ring", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ setRingKeys := func() {
+ for i := 0; i < 100; i++ {
+ err := ring.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ }
+
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(ctx, func(ctx context.Context, cl *redis.Client) error {
+ return cl.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("supports context", func() {
+ ctx, cancel := context.WithCancel(ctx)
+ cancel()
+
+ err := ring.Ping(ctx).Err()
+ Expect(err).To(MatchError("context canceled"))
+ })
+
+ It("distributes keys", func() {
+ setRingKeys()
+
+ // Both shards should have some keys now.
+ Expect(ringShard1.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=56"))
+ Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=44"))
+ })
+
+ It("distributes keys when using EVAL", func() {
+ script := redis.NewScript(`
+ local r = redis.call('SET', KEYS[1], ARGV[1])
+ return r
+ `)
+
+ var key string
+ for i := 0; i < 100; i++ {
+ key = fmt.Sprintf("key%d", i)
+ err := script.Run(ctx, ring, []string{key}, "value").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ringShard1.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=56"))
+ Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=44"))
+ })
+
+ It("uses single shard when one of the shards is down", func() {
+ // Stop ringShard2.
+ Expect(ringShard2.Close()).NotTo(HaveOccurred())
+
+ Eventually(func() int {
+ return ring.Len()
+ }, "30s").Should(Equal(1))
+
+ setRingKeys()
+
+ // RingShard1 should have all keys.
+ Expect(ringShard1.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=100"))
+
+ // Start ringShard2.
+ var err error
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ Eventually(func() int {
+ return ring.Len()
+ }, "30s").Should(Equal(2))
+
+ setRingKeys()
+
+ // RingShard2 should have its keys.
+ Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=44"))
+ })
+
+ It("supports hash tags", func() {
+ for i := 0; i < 100; i++ {
+ err := ring.Set(ctx, fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ringShard1.Info(ctx, "keyspace").Val()).ToNot(ContainSubstring("keys="))
+ Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=100"))
+ })
+
+ Describe("pipeline", func() {
+ It("distributes keys", func() {
+ pipe := ring.Pipeline()
+ for i := 0; i < 100; i++ {
+ err := pipe.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ cmds, err := pipe.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(100))
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+
+ for _, cmd := range cmds {
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.(*redis.StatusCmd).Val()).To(Equal("OK"))
+ }
+
+ // Both shards should have some keys now.
+ Expect(ringShard1.Info(ctx).Val()).To(ContainSubstring("keys=56"))
+ Expect(ringShard2.Info(ctx).Val()).To(ContainSubstring("keys=44"))
+ })
+
+ It("is consistent with ring", func() {
+ var keys []string
+ for i := 0; i < 100; i++ {
+ key := make([]byte, 64)
+ _, err := rand.Read(key)
+ Expect(err).NotTo(HaveOccurred())
+ keys = append(keys, string(key))
+ }
+
+ _, err := ring.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ for _, key := range keys {
+ pipe.Set(ctx, key, "value", 0).Err()
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ for _, key := range keys {
+ val, err := ring.Get(ctx, key).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("value"))
+ }
+ })
+
+ It("supports hash tags", func() {
+ _, err := ring.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ for i := 0; i < 100; i++ {
+ pipe.Set(ctx, fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ringShard1.Info(ctx).Val()).ToNot(ContainSubstring("keys="))
+ Expect(ringShard2.Info(ctx).Val()).To(ContainSubstring("keys=100"))
+ })
+ })
+
+ Describe("new client callback", func() {
+ It("can be initialized with a new client callback", func() {
+ opts := redisRingOptions()
+ opts.NewClient = func(name string, opt *redis.Options) *redis.Client {
+ opt.Password = "password1"
+ return redis.NewClient(opt)
+ }
+ ring = redis.NewRing(opts)
+
+ err := ring.Ping(ctx).Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("ERR AUTH"))
+ })
+ })
+
+ Describe("Process hook", func() {
+ BeforeEach(func() {
+ // the health check leads to data race for variable "stack []string".
+ // here, the health check time is set to 72 hours to avoid health check
+ opt := redisRingOptions()
+ opt.HeartbeatFrequency = 72 * time.Hour
+ ring = redis.NewRing(opt)
+ })
+ It("supports Process hook", func() {
+ err := ring.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ ring.AddHook(&hook{
+ beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcess")
+ return ctx, nil
+ },
+ afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcess")
+ return nil
+ },
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ shard.AddHook(&hook{
+ beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+ return ctx, nil
+ },
+ afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+ return nil
+ },
+ })
+ return nil
+ })
+
+ err = ring.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "ring.BeforeProcess",
+ "shard.BeforeProcess",
+ "shard.AfterProcess",
+ "ring.AfterProcess",
+ }))
+ })
+
+ It("supports Pipeline hook", func() {
+ err := ring.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ ring.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcessPipeline")
+ return nil
+ },
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ shard.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+ return nil
+ },
+ })
+ return nil
+ })
+
+ _, err = ring.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "ring.BeforeProcessPipeline",
+ "shard.BeforeProcessPipeline",
+ "shard.AfterProcessPipeline",
+ "ring.AfterProcessPipeline",
+ }))
+ })
+
+ It("supports TxPipeline hook", func() {
+ err := ring.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var stack []string
+
+ ring.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcessPipeline")
+ return nil
+ },
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ shard.AddHook(&hook{
+ beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+ return ctx, nil
+ },
+ afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+ return nil
+ },
+ })
+ return nil
+ })
+
+ _, err = ring.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(stack).To(Equal([]string{
+ "ring.BeforeProcessPipeline",
+ "shard.BeforeProcessPipeline",
+ "shard.AfterProcessPipeline",
+ "ring.AfterProcessPipeline",
+ }))
+ })
+ })
+})
+
+var _ = Describe("empty Redis Ring", func() {
+ var ring *redis.Ring
+
+ BeforeEach(func() {
+ ring = redis.NewRing(&redis.RingOptions{})
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns an error", func() {
+ err := ring.Ping(ctx).Err()
+ Expect(err).To(MatchError("redis: all ring shards are down"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := ring.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).To(MatchError("redis: all ring shards are down"))
+ })
+})
+
+var _ = Describe("Ring watch", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(ctx, func(ctx context.Context, cl *redis.Client) error {
+ return cl.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should Watch", func() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ n, err := tx.Get(ctx, key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ err := incr("key")
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ }
+ wg.Wait()
+
+ n, err := ring.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(100)))
+ })
+
+ It("should discard", func() {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "{shard}key1", "hello1", 0)
+ pipe.Discard()
+ pipe.Set(ctx, "{shard}key2", "hello2", 0)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "{shard}key1", "{shard}key2")
+ Expect(err).NotTo(HaveOccurred())
+
+ get := ring.Get(ctx, "{shard}key1")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ get = ring.Get(ctx, "{shard}key2")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello2"))
+ })
+
+ It("returns no error when there are no commands", func() {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(redis.Pipeliner) error { return nil })
+ return err
+ }, "key")
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := ring.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("PONG"))
+ })
+
+ It("should exec bulks", func() {
+ const N = 20000
+
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ for i := 0; i < N; i++ {
+ pipe.Incr(ctx, "key")
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(Equal(N))
+ for _, cmd := range cmds {
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ return err
+ }, "key")
+ Expect(err).NotTo(HaveOccurred())
+
+ num, err := ring.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(N)))
+ })
+
+ It("should Watch/Unwatch", func() {
+ var C, N int
+
+ err := ring.Set(ctx, "key", "0", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ val, err := tx.Get(ctx, "key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(Equal(redis.Nil))
+
+ num, err := strconv.ParseInt(val, 10, 64)
+ Expect(err).NotTo(HaveOccurred())
+
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "key", strconv.FormatInt(num+1, 10), 0)
+ return nil
+ })
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key")
+ if err == redis.TxFailedErr {
+ i--
+ continue
+ }
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := ring.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("should close Tx without closing the client", func() {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ }, "key")
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ring.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("respects max size on multi", func() {
+ //this test checks the number of "pool.conn"
+ //if the health check is performed at the same time
+ //conn will be used, resulting in an abnormal number of "pool.conn".
+ //
+ //redis.NewRing() does not have an option to prohibit health checks.
+ //set a relatively large time here to avoid health checks.
+ opt := redisRingOptions()
+ opt.HeartbeatFrequency = 72 * time.Hour
+ ring = redis.NewRing(opt)
+
+ perform(1000, func(id int) {
+ var ping *redis.StatusCmd
+
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ ping = pipe.Ping(ctx)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key")
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, cl *redis.Client) error {
+ defer GinkgoRecover()
+
+ pool := cl.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.IdleLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.IdleLen()))
+
+ return nil
+ })
+ })
+})
+
+var _ = Describe("Ring Tx timeout", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ AfterEach(func() {
+ _ = ring.Close()
+ })
+
+ testTimeout := func() {
+ It("Tx timeouts", func() {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ return tx.Ping(ctx).Err()
+ }, "foo")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx Pipeline timeouts", func() {
+ err := ring.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ }, "foo")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+ }
+
+ const pause = 5 * time.Second
+
+ Context("read/write timeout", func() {
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.ReadTimeout = 250 * time.Millisecond
+ opt.WriteTimeout = 250 * time.Millisecond
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
+ return client.ClientPause(ctx, pause).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
+ defer GinkgoRecover()
+ Eventually(func() error {
+ return client.Ping(ctx).Err()
+ }, 2*pause).ShouldNot(HaveOccurred())
+ return nil
+ })
+ })
+
+ testTimeout()
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go
new file mode 100644
index 0000000..5cab18d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go
@@ -0,0 +1,65 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type Scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh
new file mode 100644
index 0000000..f294c4f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh
@@ -0,0 +1,9 @@
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
+ | sed 's/^\.\///' \
+ | sort)
+
+for dir in $PACKAGE_DIRS
+do
+ printf "${dir}: go get -d && go mod tidy\n"
+ (cd ./${dir} && go get -d && go mod tidy)
+done
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh
new file mode 100644
index 0000000..2e78be6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+set -e
+
+help() {
+ cat <<- EOF
+Usage: TAG=tag $0
+
+Updates version in go.mod files and pushes a new brash to GitHub.
+
+VARIABLES:
+ TAG git tag, for example, v1.0.0
+EOF
+ exit 0
+}
+
+if [ -z "$TAG" ]
+then
+ printf "TAG is required\n\n"
+ help
+fi
+
+TAG_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
+if ! [[ "${TAG}" =~ ${TAG_REGEX} ]]; then
+ printf "TAG is not valid: ${TAG}\n\n"
+ exit 1
+fi
+
+TAG_FOUND=`git tag --list ${TAG}`
+if [[ ${TAG_FOUND} = ${TAG} ]] ; then
+ printf "tag ${TAG} already exists\n\n"
+ exit 1
+fi
+
+if ! git diff --quiet
+then
+ printf "working tree is not clean\n\n"
+ git status
+ exit 1
+fi
+
+git checkout master
+
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
+ | sed 's/^\.\///' \
+ | sort)
+
+for dir in $PACKAGE_DIRS
+do
+ printf "${dir}: go get -u && go mod tidy\n"
+ (cd ./${dir} && go get -u && go mod tidy)
+done
+
+for dir in $PACKAGE_DIRS
+do
+ sed --in-place \
+ "s/go-redis\/redis\([^ ]*\) v.*/go-redis\/redis\1 ${TAG}/" "${dir}/go.mod"
+ (cd ./${dir} && go get -u && go mod tidy)
+done
+
+sed --in-place "s/\(return \)\"[^\"]*\"/\1\"${TAG#v}\"/" ./version.go
+sed --in-place "s/\(\"version\": \)\"[^\"]*\"/\1\"${TAG#v}\"/" ./package.json
+
+conventional-changelog -p angular -i CHANGELOG.md -s
+
+git checkout -b release/${TAG} master
+git add -u
+git commit -m "chore: release $TAG (release.sh)"
+git push origin release/${TAG}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh
new file mode 100644
index 0000000..121f00e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+set -e
+
+help() {
+ cat <<- EOF
+Usage: TAG=tag $0
+
+Creates git tags for public Go packages.
+
+VARIABLES:
+ TAG git tag, for example, v1.0.0
+EOF
+ exit 0
+}
+
+if [ -z "$TAG" ]
+then
+ printf "TAG env var is required\n\n";
+ help
+fi
+
+if ! grep -Fq "\"${TAG#v}\"" version.go
+then
+ printf "version.go does not contain ${TAG#v}\n"
+ exit 1
+fi
+
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
+ | grep -E -v "example|internal" \
+ | sed 's/^\.\///' \
+ | sort)
+
+git tag ${TAG}
+git push origin ${TAG}
+
+for dir in $PACKAGE_DIRS
+do
+ printf "tagging ${dir}/${TAG}\n"
+ git tag ${dir}/${TAG}
+ git push origin ${dir}/${TAG}
+done
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go
new file mode 100644
index 0000000..ec6221d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go
@@ -0,0 +1,796 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+
+ // If specified with SentinelPassword, enables ACL-based authentication (via
+ // AUTH <user> <pass>).
+ SentinelUsername string
+ // Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+ // configuration, or, if SentinelUsername is also supplied, used for ACL-based
+ // authentication.
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to slave read-only nodes.
+ SlaveOnly bool
+
+ // Use slaves disconnected with master when cannot get connected slaves
+ // Now, this option only works in RandomSlaveAddr function.
+ UseDisconnectedSlaves bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Username: opt.SentinelUsername,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+ sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+ })
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterSlaveDialer(failover)
+ opt.init()
+
+ connPool := newConnPool(opt)
+
+ failover.mu.Lock()
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+ failover.mu.Unlock()
+
+ c := Client{
+ baseClient: newBaseClient(opt, connPool),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+ c.onClose = failover.Close
+
+ return &c
+}
+
+func masterSlaveDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.SlaveOnly {
+ addr, err = failover.RandomSlaveAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+
+ netDialer := &net.Dialer{
+ Timeout: failover.opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if failover.opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooks
+ ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ ctx: context.Background(),
+ }
+ return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+ if c.opt == nil {
+ return "", errors.New("opt is nil")
+ }
+
+ addresses, err := c.slaveAddrs(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+ addresses, err = c.slaveAddrs(ctx, true)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr := c.getMasterAddr(ctx, sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr := c.getMasterAddr(ctx, c.sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, c.sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ var sentinelReachable bool
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+ sentinelReachable = true
+ addrs := parseSlaveAddrs(slaves, useDisconnected)
+ if len(addrs) == 0 {
+ continue
+ }
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ return addrs, nil
+ }
+
+ if sentinelReachable {
+ return []string{}, nil
+ }
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ return ""
+ }
+ return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+ addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ c.opt.MasterName, err)
+ return []string{}
+ }
+ return parseSlaveAddrs(addrs, false)
+}
+
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
+ nodes := make([]string, 0, len(addrs))
+ for _, node := range addrs {
+ ip := ""
+ port := ""
+ flags := []string{}
+ lastkey := ""
+ isDown := false
+
+ for _, key := range node.([]interface{}) {
+ switch lastkey {
+ case "ip":
+ ip = key.(string)
+ case "port":
+ port = key.(string)
+ case "flags":
+ flags = strings.Split(key.(string), ",")
+ }
+ lastkey = key.(string)
+ }
+
+ for _, flag := range flags {
+ switch flag {
+ case "s_down", "o_down":
+ isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
+ }
+ }
+
+ if !isDown {
+ nodes = append(nodes, net.JoinHostPort(ip, port))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr //nolint:ifshort
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ var ip, port string
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ switch key {
+ case "ip":
+ ip = vals[i+1].(string)
+ case "port":
+ port = vals[i+1].(string)
+ }
+ }
+ if ip != "" && port != "" {
+ sentinelAddr := net.JoinHostPort(ip, port)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ slaveAddrs, err := failover.slaveAddrs(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, slaveAddr := range slaveAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: slaveAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+
+ failover.mu.Lock()
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+ failover.mu.Unlock()
+
+ return c
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go
new file mode 100644
index 0000000..753e0fc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go
@@ -0,0 +1,287 @@
+package redis_test
+
+import (
+ "net"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("Sentinel", func() {
+ var client *redis.Client
+ var master *redis.Client
+ var masterPort string
+ var sentinel *redis.SentinelClient
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ MaxRetries: -1,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+
+ sentinel = redis.NewSentinelClient(&redis.Options{
+ Addr: ":" + sentinelPort1,
+ MaxRetries: -1,
+ })
+
+ addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ master = redis.NewClient(&redis.Options{
+ Addr: net.JoinHostPort(addr[0], addr[1]),
+ MaxRetries: -1,
+ })
+ masterPort = addr[1]
+
+ // Wait until slaves are picked up by sentinel.
+ Eventually(func() string {
+ return sentinel1.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ Eventually(func() string {
+ return sentinel2.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ Eventually(func() string {
+ return sentinel3.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ _ = master.Close()
+ _ = sentinel.Close()
+ })
+
+ It("should facilitate failover", func() {
+ // Set value on master.
+ err := client.Set(ctx, "foo", "master", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Verify.
+ val, err := client.Get(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("master"))
+
+ // Verify master->slaves sync.
+ var slavesAddr []string
+ Eventually(func() []string {
+ slavesAddr = redis.GetSlavesAddrByName(ctx, sentinel, sentinelName)
+ return slavesAddr
+ }, "15s", "100ms").Should(HaveLen(2))
+ Eventually(func() bool {
+ sync := true
+ for _, addr := range slavesAddr {
+ slave := redis.NewClient(&redis.Options{
+ Addr: addr,
+ MaxRetries: -1,
+ })
+ sync = slave.Get(ctx, "foo").Val() == "master"
+ _ = slave.Close()
+ }
+ return sync
+ }, "15s", "100ms").Should(BeTrue())
+
+ // Create subscription.
+ pub := client.Subscribe(ctx, "foo")
+ ch := pub.Channel()
+
+ // Kill master.
+ err = master.Shutdown(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Eventually(func() error {
+ return master.Ping(ctx).Err()
+ }, "15s", "100ms").Should(HaveOccurred())
+
+ // Check that client picked up new master.
+ Eventually(func() string {
+ return client.Get(ctx, "foo").Val()
+ }, "15s", "100ms").Should(Equal("master"))
+
+ // Check if subscription is renewed.
+ var msg *redis.Message
+ Eventually(func() <-chan *redis.Message {
+ _ = client.Publish(ctx, "foo", "hello").Err()
+ return ch
+ }, "15s", "100ms").Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("foo"))
+ Expect(msg.Payload).To(Equal("hello"))
+ Expect(pub.Close()).NotTo(HaveOccurred())
+
+ _, err = startRedis(masterPort)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("supports DB selection", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ DB: 1,
+ })
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
+
+var _ = Describe("NewFailoverClusterClient", func() {
+ var client *redis.ClusterClient
+ var master *redis.Client
+ var masterPort string
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+
+ RouteRandomly: true,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+
+ sentinel := redis.NewSentinelClient(&redis.Options{
+ Addr: ":" + sentinelPort1,
+ MaxRetries: -1,
+ })
+
+ addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ master = redis.NewClient(&redis.Options{
+ Addr: net.JoinHostPort(addr[0], addr[1]),
+ MaxRetries: -1,
+ })
+ masterPort = addr[1]
+
+ // Wait until slaves are picked up by sentinel.
+ Eventually(func() string {
+ return sentinel1.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ Eventually(func() string {
+ return sentinel2.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ Eventually(func() string {
+ return sentinel3.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ _ = master.Close()
+ })
+
+ It("should facilitate failover", func() {
+ // Set value.
+ err := client.Set(ctx, "foo", "master", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ for i := 0; i < 100; i++ {
+ // Verify.
+ Eventually(func() string {
+ return client.Get(ctx, "foo").Val()
+ }, "15s", "1ms").Should(Equal("master"))
+ }
+
+ // Create subscription.
+ ch := client.Subscribe(ctx, "foo").Channel()
+
+ // Kill master.
+ err = master.Shutdown(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+ Eventually(func() error {
+ return sentinelMaster.Ping(ctx).Err()
+ }, "15s", "100ms").Should(HaveOccurred())
+
+ // Check that client picked up new master.
+ Eventually(func() string {
+ return client.Get(ctx, "foo").Val()
+ }, "15s", "100ms").Should(Equal("master"))
+
+ // Check if subscription is renewed.
+ var msg *redis.Message
+ Eventually(func() <-chan *redis.Message {
+ _ = client.Publish(ctx, "foo", "hello").Err()
+ return ch
+ }, "15s", "100ms").Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("foo"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ _, err = startRedis(masterPort)
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
+
+var _ = Describe("SentinelAclAuth", func() {
+ const (
+ aclSentinelUsername = "sentinel-user"
+ aclSentinelPassword = "sentinel-pass"
+ )
+
+ var client *redis.Client
+ var sentinel *redis.SentinelClient
+ var sentinels = func() []*redisProcess {
+ return []*redisProcess{sentinel1, sentinel2, sentinel3}
+ }
+
+ BeforeEach(func() {
+ authCmd := redis.NewStatusCmd(ctx, "ACL", "SETUSER", aclSentinelUsername, "ON",
+ ">"+aclSentinelPassword, "-@all", "+auth", "+client|getname", "+client|id", "+client|setname",
+ "+command", "+hello", "+ping", "+role", "+sentinel|get-master-addr-by-name", "+sentinel|master",
+ "+sentinel|myid", "+sentinel|replicas", "+sentinel|sentinels")
+
+ for _, process := range sentinels() {
+ err := process.Client.Process(ctx, authCmd)
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ MaxRetries: -1,
+ SentinelUsername: aclSentinelUsername,
+ SentinelPassword: aclSentinelPassword,
+ })
+
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+
+ sentinel = redis.NewSentinelClient(&redis.Options{
+ Addr: sentinelAddrs[0],
+ MaxRetries: -1,
+ Username: aclSentinelUsername,
+ Password: aclSentinelPassword,
+ })
+
+ _, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Wait until sentinels are picked up by each other.
+ for _, process := range sentinels() {
+ Eventually(func() string {
+ return process.Info(ctx).Val()
+ }, "15s", "100ms").Should(ContainSubstring("sentinels=3"))
+ }
+ })
+
+ AfterEach(func() {
+ unauthCommand := redis.NewStatusCmd(ctx, "ACL", "DELUSER", aclSentinelUsername)
+
+ for _, process := range sentinels() {
+ err := process.Client.Process(ctx, unauthCommand)
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ _ = client.Close()
+ _ = sentinel.Close()
+ })
+
+ It("should still facilitate operations", func() {
+ err := client.Set(ctx, "wow", "acl-auth", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get(ctx, "wow").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("acl-auth"))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
new file mode 100644
index 0000000..235b295
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
@@ -0,0 +1,10 @@
+# Minimal redis.conf
+
+port 6379
+daemonize no
+dir .
+save ""
+appendonly yes
+cluster-config-file nodes.conf
+cluster-node-timeout 30000
+maxclients 1001 \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go
new file mode 100644
index 0000000..8c9d872
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go
@@ -0,0 +1,149 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+//
+// If you don't need WATCH, use Pipeline instead.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks
+ ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool),
+ },
+ hooks: c.hooks.clone(),
+ ctx: ctx,
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.init()
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx(ctx)
+ defer tx.Close(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
+ return err
+ }
+ }
+ return fn(tx)
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go
new file mode 100644
index 0000000..7deb2df
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go
@@ -0,0 +1,151 @@
+package redis_test
+
+import (
+ "context"
+ "strconv"
+ "sync"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("Tx", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should Watch", func() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ n, err := tx.Get(ctx, key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ err := incr("key")
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ }
+ wg.Wait()
+
+ n, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(100)))
+ })
+
+ It("should discard", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Set(ctx, "key1", "hello1", 0)
+ pipe.Discard()
+ pipe.Set(ctx, "key2", "hello2", 0)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key1", "key2")
+ Expect(err).NotTo(HaveOccurred())
+
+ get := client.Get(ctx, "key1")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ get = client.Get(ctx, "key2")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello2"))
+ })
+
+ It("returns no error when there are no commands", func() {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(redis.Pipeliner) error { return nil })
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("PONG"))
+ })
+
+ It("should exec bulks", func() {
+ const N = 20000
+
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ for i := 0; i < N; i++ {
+ pipe.Incr(ctx, "key")
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(Equal(N))
+ for _, cmd := range cmds {
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ num, err := client.Get(ctx, "key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(N)))
+ })
+
+ It("should recover from bad connection", func() {
+ // Put bad connection in the pool.
+ cn, err := client.Pool().Get(context.Background())
+ Expect(err).NotTo(HaveOccurred())
+
+ cn.SetNetConn(&badConn{})
+ client.Pool().Put(ctx, cn)
+
+ do := func() error {
+ err := client.Watch(ctx, func(tx *redis.Tx) error {
+ _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Ping(ctx)
+ return nil
+ })
+ return err
+ })
+ return err
+ }
+
+ err = do()
+ Expect(err).To(MatchError("bad connection"))
+
+ err = do()
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go
new file mode 100644
index 0000000..c89b3e5
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go
@@ -0,0 +1,215 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ SentinelUsername string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+
+ MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelUsername: o.SentinelUsername,
+ SentinelPassword: o.SentinelPassword,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+ Cmdable
+ Context() context.Context
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go
new file mode 100644
index 0000000..7491a1d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go
@@ -0,0 +1,40 @@
+package redis_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis/v8"
+)
+
+var _ = Describe("UniversalClient", func() {
+ var client redis.UniversalClient
+
+ AfterEach(func() {
+ if client != nil {
+ Expect(client.Close()).To(Succeed())
+ }
+ })
+
+ It("should connect to failover servers", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ MasterName: sentinelName,
+ Addrs: sentinelAddrs,
+ })
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("should connect to simple servers", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{redisAddr},
+ })
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ It("should connect to clusters", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: cluster.addrs(),
+ })
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go
new file mode 100644
index 0000000..112c9a2
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+ return "8.11.5"
+}