From b09c6d56832eb1718c07d74abf3bc6ae3fe4e030 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 14:36:04 +0200 Subject: Adding upstream version 1.1.0. Signed-off-by: Daniel Baumann --- .../go-redis/redis/v8@v8.11.5/.github/FUNDING.yml | 1 + .../.github/ISSUE_TEMPLATE/bug_report.md | 49 + .../v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml | 5 + .../redis/v8@v8.11.5/.github/dependabot.yml | 10 + .../redis/v8@v8.11.5/.github/workflows/build.yml | 36 + .../v8@v8.11.5/.github/workflows/commitlint.yml | 11 + .../v8@v8.11.5/.github/workflows/golangci-lint.yml | 19 + .../redis/v8@v8.11.5/.github/workflows/release.yml | 17 + .../go-redis/redis/v8@v8.11.5/.gitignore | 3 + .../go-redis/redis/v8@v8.11.5/.golangci.yml | 4 + .../go-redis/redis/v8@v8.11.5/.prettierrc.yml | 4 + .../go-redis/redis/v8@v8.11.5/CHANGELOG.md | 177 + .../github.com/go-redis/redis/v8@v8.11.5/LICENSE | 25 + .../github.com/go-redis/redis/v8@v8.11.5/Makefile | 35 + .../github.com/go-redis/redis/v8@v8.11.5/README.md | 175 + .../go-redis/redis/v8@v8.11.5/RELEASING.md | 15 + .../go-redis/redis/v8@v8.11.5/bench_decode_test.go | 309 ++ .../go-redis/redis/v8@v8.11.5/bench_test.go | 412 ++ .../go-redis/redis/v8@v8.11.5/cluster.go | 1750 +++++++ .../go-redis/redis/v8@v8.11.5/cluster_commands.go | 109 + .../go-redis/redis/v8@v8.11.5/cluster_test.go | 1283 +++++ .../go-redis/redis/v8@v8.11.5/command.go | 3478 ++++++++++++ .../go-redis/redis/v8@v8.11.5/command_test.go | 96 + .../go-redis/redis/v8@v8.11.5/commands.go | 3475 ++++++++++++ .../go-redis/redis/v8@v8.11.5/commands_test.go | 5522 ++++++++++++++++++++ .../github.com/go-redis/redis/v8@v8.11.5/doc.go | 4 + .../github.com/go-redis/redis/v8@v8.11.5/error.go | 144 + .../v8@v8.11.5/example_instrumentation_test.go | 80 + .../go-redis/redis/v8@v8.11.5/example_test.go | 634 +++ .../go-redis/redis/v8@v8.11.5/export_test.go | 95 + .../go-redis/redis/v8@v8.11.5/fuzz/fuzz.go | 49 + .../github.com/go-redis/redis/v8@v8.11.5/go.mod | 20 + .../github.com/go-redis/redis/v8@v8.11.5/go.sum | 108 + .../go-redis/redis/v8@v8.11.5/internal/arg.go | 56 + .../redis/v8@v8.11.5/internal/hashtag/hashtag.go | 78 + .../v8@v8.11.5/internal/hashtag/hashtag_test.go | 71 + .../redis/v8@v8.11.5/internal/hscan/hscan.go | 201 + .../redis/v8@v8.11.5/internal/hscan/hscan_test.go | 178 + .../redis/v8@v8.11.5/internal/hscan/structmap.go | 93 + .../go-redis/redis/v8@v8.11.5/internal/internal.go | 29 + .../redis/v8@v8.11.5/internal/internal_test.go | 18 + .../go-redis/redis/v8@v8.11.5/internal/log.go | 26 + .../go-redis/redis/v8@v8.11.5/internal/once.go | 60 + .../redis/v8@v8.11.5/internal/pool/bench_test.go | 97 + .../redis/v8@v8.11.5/internal/pool/conn.go | 121 + .../redis/v8@v8.11.5/internal/pool/export_test.go | 9 + .../redis/v8@v8.11.5/internal/pool/main_test.go | 36 + .../redis/v8@v8.11.5/internal/pool/pool.go | 557 ++ .../redis/v8@v8.11.5/internal/pool/pool_single.go | 58 + .../redis/v8@v8.11.5/internal/pool/pool_sticky.go | 201 + .../redis/v8@v8.11.5/internal/pool/pool_test.go | 458 ++ .../redis/v8@v8.11.5/internal/proto/proto_test.go | 13 + .../redis/v8@v8.11.5/internal/proto/reader.go | 332 ++ .../redis/v8@v8.11.5/internal/proto/reader_test.go | 72 + .../redis/v8@v8.11.5/internal/proto/scan.go | 180 + .../redis/v8@v8.11.5/internal/proto/scan_test.go | 50 + .../redis/v8@v8.11.5/internal/proto/writer.go | 155 + .../redis/v8@v8.11.5/internal/proto/writer_test.go | 93 + .../redis/v8@v8.11.5/internal/rand/rand.go | 50 + .../go-redis/redis/v8@v8.11.5/internal/safe.go | 12 + .../go-redis/redis/v8@v8.11.5/internal/unsafe.go | 21 + .../go-redis/redis/v8@v8.11.5/internal/util.go | 46 + .../redis/v8@v8.11.5/internal/util/safe.go | 12 + .../redis/v8@v8.11.5/internal/util/strconv.go | 19 + .../redis/v8@v8.11.5/internal/util/unsafe.go | 23 + .../go-redis/redis/v8@v8.11.5/internal_test.go | 67 + .../go-redis/redis/v8@v8.11.5/iterator.go | 77 + .../go-redis/redis/v8@v8.11.5/iterator_test.go | 136 + .../go-redis/redis/v8@v8.11.5/main_test.go | 448 ++ .../go-redis/redis/v8@v8.11.5/options.go | 429 ++ .../go-redis/redis/v8@v8.11.5/options_test.go | 216 + .../go-redis/redis/v8@v8.11.5/package.json | 8 + .../go-redis/redis/v8@v8.11.5/pipeline.go | 147 + .../go-redis/redis/v8@v8.11.5/pipeline_test.go | 104 + .../go-redis/redis/v8@v8.11.5/pool_test.go | 157 + .../github.com/go-redis/redis/v8@v8.11.5/pubsub.go | 668 +++ .../go-redis/redis/v8@v8.11.5/pubsub_test.go | 495 ++ .../go-redis/redis/v8@v8.11.5/race_test.go | 392 ++ .../github.com/go-redis/redis/v8@v8.11.5/redis.go | 773 +++ .../go-redis/redis/v8@v8.11.5/redis_test.go | 449 ++ .../github.com/go-redis/redis/v8@v8.11.5/result.go | 180 + .../github.com/go-redis/redis/v8@v8.11.5/ring.go | 736 +++ .../go-redis/redis/v8@v8.11.5/ring_test.go | 645 +++ .../github.com/go-redis/redis/v8@v8.11.5/script.go | 65 + .../go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh | 9 + .../go-redis/redis/v8@v8.11.5/scripts/release.sh | 69 + .../go-redis/redis/v8@v8.11.5/scripts/tag.sh | 42 + .../go-redis/redis/v8@v8.11.5/sentinel.go | 796 +++ .../go-redis/redis/v8@v8.11.5/sentinel_test.go | 287 + .../go-redis/redis/v8@v8.11.5/testdata/redis.conf | 10 + .../mod/github.com/go-redis/redis/v8@v8.11.5/tx.go | 149 + .../go-redis/redis/v8@v8.11.5/tx_test.go | 151 + .../go-redis/redis/v8@v8.11.5/universal.go | 215 + .../go-redis/redis/v8@v8.11.5/universal_test.go | 40 + .../go-redis/redis/v8@v8.11.5/version.go | 6 + 95 files changed, 29545 insertions(+) create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go create mode 100644 dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go (limited to 'dependencies/pkg/mod/github.com/go-redis') diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml new file mode 100644 index 0000000..707670d --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: ['https://uptrace.dev/sponsor'] diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..3f934f8 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,49 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' +--- + +Issue tracker is used for reporting bugs and discussing new features. Please use +[stackoverflow](https://stackoverflow.com) for supporting issues. + + + +## Expected Behavior + + + +## Current Behavior + + + +## Possible Solution + + + +## Steps to Reproduce + + + + +1. +2. +3. +4. + +## Context (Environment) + + + + + + +## Detailed Description + + + +## Possible Implementation + + diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..e86d7a6 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: true +contact_links: + - name: Discussions + url: https://github.com/go-redis/redis/discussions + about: Ask a question via GitHub Discussions diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml new file mode 100644 index 0000000..77b7be5 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: +- package-ecosystem: gomod + directory: / + schedule: + interval: weekly +- package-ecosystem: github-actions + directory: / + schedule: + interval: weekly diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml new file mode 100644 index 0000000..a574e2e --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml @@ -0,0 +1,36 @@ +name: Go + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + build: + name: build + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + go-version: [1.16.x, 1.17.x] + + services: + redis: + image: redis + options: >- + --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 + ports: + - 6379:6379 + + steps: + - name: Set up ${{ matrix.go-version }} + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go-version }} + + - name: Checkout code + uses: actions/checkout@v3 + + - name: Test + run: make test diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml new file mode 100644 index 0000000..5fcfeae --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml @@ -0,0 +1,11 @@ +name: Lint Commit Messages +on: [pull_request] + +jobs: + commitlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - uses: wagoid/commitlint-github-action@v4 diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000..28c16c5 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml @@ -0,0 +1,19 @@ +name: golangci-lint + +on: + push: + tags: + - v* + branches: + - master + - main + pull_request: + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml new file mode 100644 index 0000000..685693a --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml @@ -0,0 +1,17 @@ +name: Releases + +on: + push: + tags: + - 'v*' + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: ncipollo/release-action@v1 + with: + body: + Please refer to + [CHANGELOG.md](https://github.com/go-redis/redis/blob/master/CHANGELOG.md) for details diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore new file mode 100644 index 0000000..b975a7b --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore @@ -0,0 +1,3 @@ +*.rdb +testdata/*/ +.idea/ diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml new file mode 100644 index 0000000..de51455 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml @@ -0,0 +1,4 @@ +run: + concurrency: 8 + deadline: 5m + tests: false diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml new file mode 100644 index 0000000..8b7f044 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md new file mode 100644 index 0000000..195e519 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md @@ -0,0 +1,177 @@ +## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) + + +### Bug Fixes + +* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) +* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) +* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) +* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) +* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) +* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) +* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) +* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) +* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) + + +### Features + +* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) +* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) +* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) +* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) +* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) +* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) +* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) + + + +## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) + + +### Features + +* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) +* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) +* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) + + + +## v8.11 + +- Remove OpenTelemetry metrics. +- Supports more redis commands and options. + +## v8.10 + +- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a + single span with a Redis command (instead of 4 spans). There are multiple reasons behind this + decision: + + - Traces become smaller and less noisy. + - It may be costly to process those 3 extra spans for each query. + - go-redis no longer depends on OpenTelemetry. + + Eventually we hope to replace the information that we no longer collect with OpenTelemetry + Metrics. + +## v8.9 + +- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, + `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. + +## v8.8 + +- To make updating easier, extra modules now have the same version as go-redis does. That means that + you need to update your imports: + +``` +github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 +github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 +``` + +## v8.5 + +- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a + struct: + +```go +err := rdb.HGetAll(ctx, "hash").Scan(&data) + +err := rdb.MGet(ctx, "key1", "key2").Scan(&data) +``` + +- Please check [redismock](https://github.com/go-redis/redismock) by + [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. + +## v8 + +- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not + using `context.Context` yet, the simplest option is to define global package variable + `var ctx = context.TODO()` and use it when `ctx` is required. + +- Full support for `context.Context` canceling. + +- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. + +- Added `redisext.OpenTemetryHook` that adds + [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). + +- Redis slow log support. + +- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move + existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: + +```go +import "github.com/golang/groupcache/consistenthash" + +ring := redis.NewRing(&redis.RingOptions{ + NewConsistentHash: func() { + return consistenthash.New(100, crc32.ChecksumIEEE) + }, +}) +``` + +- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. +- `Options.MaxRetries` default value is changed from 0 to 3. + +- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. + +## v7.3 + +- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection + URL contains username. + +## v7.2 + +- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. + +## v7.1 + +- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` + interface. + +## v7 + +- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a + transactional pipeline. +- WrapProcess is replaced with more convenient AddHook that has access to context.Context. +- WithContext now can not be used to create a shallow copy of the client. +- New methods ProcessContext, DoContext, and ExecContext. +- Client respects Context.Deadline when setting net.Conn deadline. +- Client listens on Context.Done while waiting for a connection from the pool and returns an error + when context context is cancelled. +- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow + detecting reconnections. +- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse + the time. +- `SetLimiter` is removed and added `Options.Limiter` instead. +- `HMSet` is deprecated as of Redis v4. + +## v6.15 + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set + `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout + occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis + Servers that don't have cluster mode enabled. See + https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE new file mode 100644 index 0000000..298bed9 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile new file mode 100644 index 0000000..a4cfe05 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile @@ -0,0 +1,35 @@ +PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) + +test: testdeps + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + cd $< && make all + +fmt: + gofmt -w -s ./ + goimports -w -local github.com/go-redis/redis ./ + +go_mod_tidy: + go get -u && go mod tidy + set -e; for dir in $(PACKAGE_DIRS); do \ + echo "go mod tidy in $${dir}"; \ + (cd "$${dir}" && \ + go get -u && \ + go mod tidy); \ + done diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md new file mode 100644 index 0000000..f3b6a01 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md @@ -0,0 +1,175 @@ +# Redis client for Go + +![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) + +go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +Uptrace is an open source and blazingly fast **distributed tracing** backend powered by +OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources + +- [Discussions](https://github.com/go-redis/redis/discussions) +- [Documentation](https://redis.uptrace.dev) +- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) +- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) + +Other projects you may like: + +- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. +- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. + +## Ecosystem + +- [Redis Mock](https://github.com/go-redis/redismock) +- [Distributed Locks](https://github.com/bsm/redislock) +- [Redis Cache](https://github.com/go-redis/cache) +- [Rate limiting](https://github.com/go-redis/redis_rate) + +## Features + +- Redis 3 commands except QUIT, MONITOR, and SYNC. +- Automatic connection pooling with + [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. +- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). +- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). +- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and + [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). +- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). +- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). +- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). +- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). +- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) + without using cluster mode and Redis Sentinel. +- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). +- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). + +## Installation + +go-redis supports 2 last Go versions and requires a Go version with +[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go +module: + +```shell +go mod init github.com/my/repo +``` + +And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): + +```shell +go get github.com/go-redis/redis/v8 +``` + +## Quickstart + +```go +import ( + "context" + "github.com/go-redis/redis/v8" + "fmt" +) + +var ctx = context.Background() + +func ExampleClient() { + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + err := rdb.Set(ctx, "key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := rdb.Get(ctx, "key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := rdb.Get(ctx, "key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() + +// SET key value keepttl NX +set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ + Keys: []string{"zset1", "zset2"}, + Weights: []int64{2, 3} +}).Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() + +// custom command +res, err := rdb.Do(ctx, "set", "key", "value").Result() +``` + +## Run the test + +go-redis will start a redis-server and run the test cases. + +The paths of redis-server bin file and redis config file are defined in `main_test.go`: + +``` +var ( + redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) + redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) +) +``` + +For local testing, you can change the variables to refer to your local files, or create a soft link +to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: + +``` +ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src +cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ +``` + +Lastly, run: + +``` +go test +``` + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md new file mode 100644 index 0000000..1115db4 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md @@ -0,0 +1,15 @@ +# Releasing + +1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: + +```shell +TAG=v1.0.0 ./scripts/release.sh +``` + +2. Open a pull request and wait for the build to finish. + +3. Merge the pull request and run `tag.sh` to create tags for packages: + +```shell +TAG=v1.0.0 ./scripts/tag.sh +``` diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go new file mode 100644 index 0000000..8382806 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go @@ -0,0 +1,309 @@ +package redis + +import ( + "context" + "fmt" + "io" + "net" + "testing" + "time" + + "github.com/go-redis/redis/v8/internal/proto" +) + +var ctx = context.TODO() + +type ClientStub struct { + Cmdable + resp []byte +} + +func NewClientStub(resp []byte) *ClientStub { + stub := &ClientStub{ + resp: resp, + } + stub.Cmdable = NewClient(&Options{ + PoolSize: 128, + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return stub.stubConn(), nil + }, + }) + return stub +} + +func NewClusterClientStub(resp []byte) *ClientStub { + stub := &ClientStub{ + resp: resp, + } + + client := NewClusterClient(&ClusterOptions{ + PoolSize: 128, + Addrs: []string{"127.0.0.1:6379"}, + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return stub.stubConn(), nil + }, + ClusterSlots: func(_ context.Context) ([]ClusterSlot, error) { + return []ClusterSlot{ + { + Start: 0, + End: 16383, + Nodes: []ClusterNode{{Addr: "127.0.0.1:6379"}}, + }, + }, nil + }, + }) + + // init command. + tmpClient := NewClient(&Options{Addr: ":6379"}) + cmdsInfo, err := tmpClient.Command(ctx).Result() + _ = tmpClient.Close() + client.cmdsInfoCache = newCmdsInfoCache(func(_ context.Context) (map[string]*CommandInfo, error) { + return cmdsInfo, err + }) + + stub.Cmdable = client + return stub +} + +func (c *ClientStub) stubConn() *ConnStub { + return &ConnStub{ + resp: c.resp, + } +} + +type ConnStub struct { + resp []byte + pos int +} + +func (c *ConnStub) Read(b []byte) (n int, err error) { + if len(c.resp) == 0 { + return 0, io.EOF + } + + if c.pos >= len(c.resp) { + c.pos = 0 + } + n = copy(b, c.resp[c.pos:]) + c.pos += n + return n, nil +} + +func (c *ConnStub) Write(b []byte) (n int, err error) { return len(b), nil } +func (c *ConnStub) Close() error { return nil } +func (c *ConnStub) LocalAddr() net.Addr { return nil } +func (c *ConnStub) RemoteAddr() net.Addr { return nil } +func (c *ConnStub) SetDeadline(_ time.Time) error { return nil } +func (c *ConnStub) SetReadDeadline(_ time.Time) error { return nil } +func (c *ConnStub) SetWriteDeadline(_ time.Time) error { return nil } + +type ClientStubFunc func([]byte) *ClientStub + +func BenchmarkDecode(b *testing.B) { + type Benchmark struct { + name string + stub ClientStubFunc + } + + benchmarks := []Benchmark{ + {"single", NewClientStub}, + {"cluster", NewClusterClientStub}, + } + + for _, bench := range benchmarks { + b.Run(fmt.Sprintf("RespError-%s", bench.name), func(b *testing.B) { + respError(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespStatus-%s", bench.name), func(b *testing.B) { + respStatus(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespInt-%s", bench.name), func(b *testing.B) { + respInt(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespString-%s", bench.name), func(b *testing.B) { + respString(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespArray-%s", bench.name), func(b *testing.B) { + respArray(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespPipeline-%s", bench.name), func(b *testing.B) { + respPipeline(b, bench.stub) + }) + b.Run(fmt.Sprintf("RespTxPipeline-%s", bench.name), func(b *testing.B) { + respTxPipeline(b, bench.stub) + }) + + // goroutine + b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=5", bench.name), func(b *testing.B) { + dynamicGoroutine(b, bench.stub, 5) + }) + b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=20", bench.name), func(b *testing.B) { + dynamicGoroutine(b, bench.stub, 20) + }) + b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=50", bench.name), func(b *testing.B) { + dynamicGoroutine(b, bench.stub, 50) + }) + b.Run(fmt.Sprintf("DynamicGoroutine-%s-pool=100", bench.name), func(b *testing.B) { + dynamicGoroutine(b, bench.stub, 100) + }) + + b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=5", bench.name), func(b *testing.B) { + staticGoroutine(b, bench.stub, 5) + }) + b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=20", bench.name), func(b *testing.B) { + staticGoroutine(b, bench.stub, 20) + }) + b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=50", bench.name), func(b *testing.B) { + staticGoroutine(b, bench.stub, 50) + }) + b.Run(fmt.Sprintf("StaticGoroutine-%s-pool=100", bench.name), func(b *testing.B) { + staticGoroutine(b, bench.stub, 100) + }) + } +} + +func respError(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("-ERR test error\r\n")) + respErr := proto.RedisError("ERR test error") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := rdb.Get(ctx, "key").Err(); err != respErr { + b.Fatalf("response error, got %q, want %q", err, respErr) + } + } +} + +func respStatus(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("+OK\r\n")) + var val string + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if val = rdb.Set(ctx, "key", "value", 0).Val(); val != "OK" { + b.Fatalf("response error, got %q, want OK", val) + } + } +} + +func respInt(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte(":10\r\n")) + var val int64 + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if val = rdb.Incr(ctx, "key").Val(); val != 10 { + b.Fatalf("response error, got %q, want 10", val) + } + } +} + +func respString(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("$5\r\nhello\r\n")) + var val string + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if val = rdb.Get(ctx, "key").Val(); val != "hello" { + b.Fatalf("response error, got %q, want hello", val) + } + } +} + +func respArray(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("*3\r\n$5\r\nhello\r\n:10\r\n+OK\r\n")) + var val []interface{} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if val = rdb.MGet(ctx, "key").Val(); len(val) != 3 { + b.Fatalf("response error, got len(%d), want len(3)", len(val)) + } + } +} + +func respPipeline(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("+OK\r\n$5\r\nhello\r\n:1\r\n")) + var pipe Pipeliner + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pipe = rdb.Pipeline() + set := pipe.Set(ctx, "key", "value", 0) + get := pipe.Get(ctx, "key") + del := pipe.Del(ctx, "key") + _, err := pipe.Exec(ctx) + if err != nil { + b.Fatalf("response error, got %q, want nil", err) + } + if set.Val() != "OK" || get.Val() != "hello" || del.Val() != 1 { + b.Fatal("response error") + } + } +} + +func respTxPipeline(b *testing.B, stub ClientStubFunc) { + rdb := stub([]byte("+OK\r\n+QUEUED\r\n+QUEUED\r\n+QUEUED\r\n*3\r\n+OK\r\n$5\r\nhello\r\n:1\r\n")) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var set *StatusCmd + var get *StringCmd + var del *IntCmd + _, err := rdb.TxPipelined(ctx, func(pipe Pipeliner) error { + set = pipe.Set(ctx, "key", "value", 0) + get = pipe.Get(ctx, "key") + del = pipe.Del(ctx, "key") + return nil + }) + if err != nil { + b.Fatalf("response error, got %q, want nil", err) + } + if set.Val() != "OK" || get.Val() != "hello" || del.Val() != 1 { + b.Fatal("response error") + } + } +} + +func dynamicGoroutine(b *testing.B, stub ClientStubFunc, concurrency int) { + rdb := stub([]byte("$5\r\nhello\r\n")) + c := make(chan struct{}, concurrency) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + c <- struct{}{} + go func() { + if val := rdb.Get(ctx, "key").Val(); val != "hello" { + panic(fmt.Sprintf("response error, got %q, want hello", val)) + } + <-c + }() + } + // Here no longer wait for all goroutines to complete, it will not affect the test results. + close(c) +} + +func staticGoroutine(b *testing.B, stub ClientStubFunc, concurrency int) { + rdb := stub([]byte("$5\r\nhello\r\n")) + c := make(chan struct{}, concurrency) + + b.ResetTimer() + + for i := 0; i < concurrency; i++ { + go func() { + for { + _, ok := <-c + if !ok { + return + } + if val := rdb.Get(ctx, "key").Val(); val != "hello" { + panic(fmt.Sprintf("response error, got %q, want hello", val)) + } + } + }() + } + for i := 0; i < b.N; i++ { + c <- struct{}{} + } + close(c) +} diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go new file mode 100644 index 0000000..ba81ce8 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go @@ -0,0 +1,412 @@ +package redis_test + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/go-redis/redis/v8" +) + +func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client { + client := redis.NewClient(&redis.Options{ + Addr: ":6379", + DialTimeout: time.Second, + ReadTimeout: time.Second, + WriteTimeout: time.Second, + PoolSize: poolSize, + }) + if err := client.FlushDB(ctx).Err(); err != nil { + panic(err) + } + return client +} + +func BenchmarkRedisPing(b *testing.B) { + ctx := context.Background() + rdb := benchmarkRedisClient(ctx, 10) + defer rdb.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := rdb.Ping(ctx).Err(); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkSetGoroutines(b *testing.B) { + ctx := context.Background() + rdb := benchmarkRedisClient(ctx, 10) + defer rdb.Close() + + for i := 0; i < b.N; i++ { + var wg sync.WaitGroup + + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + err := rdb.Set(ctx, "hello", "world", 0).Err() + if err != nil { + panic(err) + } + }() + } + + wg.Wait() + } +} + +func BenchmarkRedisGetNil(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := client.Get(ctx, "key").Err(); err != redis.Nil { + b.Fatal(err) + } + } + }) +} + +type setStringBenchmark struct { + poolSize int + valueSize int +} + +func (bm setStringBenchmark) String() string { + return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize) +} + +func BenchmarkRedisSetString(b *testing.B) { + benchmarks := []setStringBenchmark{ + {10, 64}, + {10, 1024}, + {10, 64 * 1024}, + {10, 1024 * 1024}, + {10, 10 * 1024 * 1024}, + + {100, 64}, + {100, 1024}, + {100, 64 * 1024}, + {100, 1024 * 1024}, + {100, 10 * 1024 * 1024}, + } + for _, bm := range benchmarks { + b.Run(bm.String(), func(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, bm.poolSize) + defer client.Close() + + value := strings.Repeat("1", bm.valueSize) + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Set(ctx, "key", value, 0).Err() + if err != nil { + b.Fatal(err) + } + } + }) + }) + } +} + +func BenchmarkRedisSetGetBytes(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + value := bytes.Repeat([]byte{'1'}, 10000) + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := client.Set(ctx, "key", value, 0).Err(); err != nil { + b.Fatal(err) + } + + got, err := client.Get(ctx, "key").Bytes() + if err != nil { + b.Fatal(err) + } + if !bytes.Equal(got, value) { + b.Fatalf("got != value") + } + } + }) +} + +func BenchmarkRedisMGet(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := client.MGet(ctx, "key1", "key2").Err(); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkSetExpire(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil { + b.Fatal(err) + } + if err := client.Expire(ctx, "key", time.Second).Err(); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkPipeline(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Set(ctx, "key", "hello", 0) + pipe.Expire(ctx, "key", time.Second) + return nil + }) + if err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkZAdd(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.ZAdd(ctx, "key", &redis.Z{ + Score: float64(1), + Member: "hello", + }).Err() + if err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkXRead(b *testing.B) { + ctx := context.Background() + client := benchmarkRedisClient(ctx, 10) + defer client.Close() + + args := redis.XAddArgs{ + Stream: "1", + ID: "*", + Values: map[string]string{"uno": "dos"}, + } + + lenStreams := 16 + streams := make([]string, 0, lenStreams) + for i := 0; i < lenStreams; i++ { + streams = append(streams, strconv.Itoa(i)) + } + for i := 0; i < lenStreams; i++ { + streams = append(streams, "0") + } + + b.ReportAllocs() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + client.XAdd(ctx, &args) + + err := client.XRead(ctx, &redis.XReadArgs{ + Streams: streams, + Count: 1, + Block: time.Second, + }).Err() + if err != nil { + b.Fatal(err) + } + } + }) +} + +var clientSink *redis.Client + +func BenchmarkWithContext(b *testing.B) { + ctx := context.Background() + rdb := benchmarkRedisClient(ctx, 10) + defer rdb.Close() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + clientSink = rdb.WithContext(ctx) + } +} + +var ringSink *redis.Ring + +func BenchmarkRingWithContext(b *testing.B) { + ctx := context.Background() + rdb := redis.NewRing(&redis.RingOptions{}) + defer rdb.Close() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + ringSink = rdb.WithContext(ctx) + } +} + +//------------------------------------------------------------------------------ + +func newClusterScenario() *clusterScenario { + return &clusterScenario{ + ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, + nodeIDs: make([]string, 6), + processes: make(map[string]*redisProcess, 6), + clients: make(map[string]*redis.Client, 6), + } +} + +func BenchmarkClusterPing(b *testing.B) { + if testing.Short() { + b.Skip("skipping in short mode") + } + + ctx := context.Background() + cluster := newClusterScenario() + if err := startCluster(ctx, cluster); err != nil { + b.Fatal(err) + } + defer cluster.Close() + + client := cluster.newClusterClient(ctx, redisClusterOptions()) + defer client.Close() + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Ping(ctx).Err() + if err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkClusterDoInt(b *testing.B) { + if testing.Short() { + b.Skip("skipping in short mode") + } + + ctx := context.Background() + cluster := newClusterScenario() + if err := startCluster(ctx, cluster); err != nil { + b.Fatal(err) + } + defer cluster.Close() + + client := cluster.newClusterClient(ctx, redisClusterOptions()) + defer client.Close() + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Do(ctx, "SET", 10, 10).Err() + if err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkClusterSetString(b *testing.B) { + if testing.Short() { + b.Skip("skipping in short mode") + } + + ctx := context.Background() + cluster := newClusterScenario() + if err := startCluster(ctx, cluster); err != nil { + b.Fatal(err) + } + defer cluster.Close() + + client := cluster.newClusterClient(ctx, redisClusterOptions()) + defer client.Close() + + value := string(bytes.Repeat([]byte{'1'}, 10000)) + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Set(ctx, "key", value, 0).Err() + if err != nil { + b.Fatal(err) + } + } + }) +} + +var clusterSink *redis.ClusterClient + +func BenchmarkClusterWithContext(b *testing.B) { + ctx := context.Background() + rdb := redis.NewClusterClient(&redis.ClusterOptions{}) + defer rdb.Close() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + clusterSink = rdb.WithContext(ctx) + } +} diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go new file mode 100644 index 0000000..a54f2f3 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go @@ -0,0 +1,1750 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/rand" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // NewClient creates a cluster node client with provided name and options. + NewClient func(opt *Options) *Client + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 3 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func(context.Context) ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 3 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.GOMAXPROCS(0) + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + if opt.MaxRetries == 0 { + opt.MaxRetries = -1 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } + + if opt.NewClient == nil { + opt.NewClient = NewClient + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + // If ClusterSlots is populated, then we probably have an artificial + // cluster whose nodes are not in clustering mode (otherwise there isn't + // much use for ClusterSlots config). This means we cannot execute the + // READONLY command against that node -- setting readOnly to false in such + // situations in the options below will prevent that from happening. + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + failing uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: clOpt.NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const numProbe = 10 + var dur uint64 + + for i := 0; i < numProbe; i++ { + time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) + + start := time.Now() + n.Client.Ping(context.TODO()) + dur += uint64(time.Since(start) / time.Microsecond) + } + + latency := float64(dur) / float64(numProbe) + atomic.StoreUint32(&n.latency, uint32(latency+0.5)) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsFailing() { + atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Failing() bool { + const timeout = 15 // 15 seconds + + failing := atomic.LoadUint32(&n.failing) + if failing == 0 { + return false + } + if time.Now().Unix()-int64(failing) < timeout { + return true + } + atomic.StoreUint32(&n.failing, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + addrs []string + nodes map[string]*clusterNode + activeAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + addrs: opt.Addrs, + nodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.nodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.nodes = nil + c.activeAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + + c.mu.RLock() + closed := c.closed //nolint:ifshort + if !closed { + if len(c.activeAddrs) > 0 { + addrs = c.activeAddrs + } else { + addrs = c.addrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + //nolint:prealloc + var collected []*clusterNode + + c.mu.Lock() + + c.activeAddrs = c.activeAddrs[:0] + for addr, node := range c.nodes { + if node.Generation() >= generation { + c.activeAddrs = append(c.activeAddrs, addr) + if c.opt.RouteByLatency { + go node.updateLatency() + } + continue + } + + delete(c.nodes, addr) + collected = append(collected, node) + } + + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.nodes[addr] + if ok { + return node, nil + } + + node = newClusterNode(c.opt, addr) + + c.addrs = appendIfNotExists(c.addrs, addr) + c.nodes[addr] = node + + return node, nil +} + +func (c *clusterNodes) get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.nodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.nodes)) + for _, node := range c.nodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Failing() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Failing() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Failing() { + continue + } + if node == nil || n.Latency() < node.Latency() { + node = n + } + } + if node != nil { + return node, nil + } + + // If all nodes are failing - return random node + return c.nodes.Random() +} + +func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + if len(nodes) == 1 { + return nodes[0], nil + } + randomNodes := rand.Perm(len(nodes)) + for _, idx := range randomNodes { + if node := nodes[idx]; !node.Failing() { + return node, nil + } + } + return nodes[randomNodes[0]], nil +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func(ctx context.Context) (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { + state, err := c.load(ctx) + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload(context.Background()) + if err != nil { + return + } + time.Sleep(200 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { + v := c.state.Load() + if v == nil { + return c.Reload(ctx) + } + + state := v.(*clusterState) + if time.Since(state.createdAt) > 10*time.Second { + c.LazyReload() + } + return state, nil +} + +func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { + state, err := c.Reload(ctx) + if err == nil { + return state, nil + } + return c.Get(ctx) +} + +//------------------------------------------------------------------------------ + +type clusterClient struct { + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder //nolint:structcheck + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + *clusterClient + cmdable + hooks + ctx context.Context +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + clusterClient: &clusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + }, + ctx: context.Background(), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + c.cmdable = c.Process + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) Context() context.Context { + return c.ctx +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState(ctx context.Context) { + c.state.LazyReload() +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + var node *clusterNode + var ask bool + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + if node == nil { + var err error + node, err = c.cmdNode(ctx, cmdInfo, slot) + if err != nil { + return err + } + } + + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(ctx, NewCmd(ctx, "asking")) + _ = pipe.Process(ctx, cmd) + _, lastErr = pipe.Exec(ctx) + _ = pipe.Close() + ask = false + } else { + lastErr = node.Client.Process(ctx, cmd) + } + + // If there is no error - we are done. + if lastErr == nil { + return nil + } + if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node = nil + continue + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && isLoadingError(lastErr) { + node.MarkAsFailing() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(lastErr) + if moved || ask { + c.state.LazyReload() + + var err error + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if shouldRetry(lastErr, cmd.readTimeout() == nil) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try another node. + node.MarkAsFailing() + node = nil + continue + } + + return lastErr + } + return lastErr +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachShard concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get(context.TODO()) + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots(ctx) + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + for _, idx := range rand.Perm(len(addrs)) { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots(ctx).Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + /* + * No node is connectable. It's possible that all nodes' IP has changed. + * Clear activeAddrs to let client be able to re-connect using the initial + * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), + * which might have chance to resolve domain name and get updated IP address. + */ + c.nodes.mu.Lock() + c.nodes.activeAddrs = nil + c.nodes.mu.Unlock() + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c._processPipeline) +} + +func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(ctx, cmdsMap, cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil + } + + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := state.slotMasterNode(slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) _processPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) + }) + }) + }) +} + +func (c *ClusterClient) pipelineReadCmds( + ctx context.Context, + node *clusterNode, + rd *proto.Reader, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + + if err == nil { + continue + } + + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + continue + } + + if c.opt.ReadOnly && isLoadingError(err) { + node.MarkAsFailing() + return err + } + if isRedisError(err) { + continue + } + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := isMovedError(err) + if !moved && !ask { + return false + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + if moved { + c.state.LazyReload() + failedCmds.Add(node, cmd) + return true + } + + if ask { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + return true + } + + panic("not reached") +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) +} + +func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { + // Trim multi .. exec. + cmds = cmds[1 : len(cmds)-1] + + state, err := c.state.Get(ctx) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) _processTxPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) + if err != nil { + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) + } + return err + } + + return pipelineReadCmds(rd, cmds) + }) + }) + }) +} + +func (c *ClusterClient) txPipelineReadQueued( + ctx context.Context, + rd *proto.Reader, + statusCmd *StatusCmd, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { + continue + } + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + return fmt.Errorf("redis: expected '*', but got line %q", line) + } + + return nil +} + +func (c *ClusterClient) cmdsMoved( + ctx context.Context, cmds []Cmder, + moved, ask bool, + addr string, + failedCmds *cmdsMap, +) error { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + + if moved { + c.state.LazyReload() + for _, cmd := range cmds { + failedCmds.Add(node, cmd) + } + return nil + } + + if ask { + for _, cmd := range cmds { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + } + return nil + } + + return nil +} + +func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + err = node.Client.Watch(ctx, fn, keys...) + if err == nil { + break + } + + moved, ask, addr := isMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + continue + } + + if shouldRetry(err, true) { + continue + } + + return err + } + + return err +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(ctx, slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn(context.TODO()) + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // Try 3 random nodes. + const nodeLimit = 3 + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + perm := rand.Perm(len(addrs)) + if len(perm) > nodeLimit { + perm = perm[:nodeLimit] + } + + for _, idx := range perm { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + info, err := node.Client.Command(ctx).Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + + if firstErr == nil { + panic("not reached") + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + } + return info +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdNode( + ctx context.Context, + cmdInfo *CommandInfo, + slot int, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + return state.slotMasterNode(slot) +} + +func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { + if c.opt.RouteByLatency { + return state.slotClosestNode(slot) + } + if c.opt.RouteRandomly { + return state.slotRandomNode(slot) + } + return state.slotSlaveNode(slot) +} + +func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + return state.slotMasterNode(slot) +} + +// SlaveForKey gets a client for a replica node to run any command on it. +// This is especially useful if we want to run a particular lua script which has +// only read only commands on the replica. +// This is because other redis commands generally have a flag that points that +// they are read only and automatically run on the replica nodes +// if ClusterOptions.ReadOnly flag is set to true. +func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + slot := hashtag.Slot(key) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +// MasterForKey return a client to the master node for a particular key. +func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { + slot := hashtag.Slot(key) + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +//------------------------------------------------------------------------------ + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { + m.mu.Lock() + m.m[node] = append(m.m[node], cmds...) + m.mu.Unlock() +} diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go new file mode 100644 index 0000000..085bce8 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go @@ -0,0 +1,109 @@ +package redis + +import ( + "context" + "sync" + "sync/atomic" +) + +func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var size int64 + err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { + n, err := master.DBSize(ctx).Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = size + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { + cmd := NewStringCmd(ctx, "script", "load", script) + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptLoad(ctx, script).Result() + if err != nil { + return err + } + + mu.Lock() + if cmd.Val() == "" { + cmd.val = val + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "flush") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + return shard.ScriptFlush(ctx).Err() + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(hashes)) + args[0] = "script" + args[1] = "exists" + for i, hash := range hashes { + args[2+i] = hash + } + cmd := NewBoolSliceCmd(ctx, args...) + + result := make([]bool, len(hashes)) + for i := range result { + result[i] = true + } + + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptExists(ctx, hashes...).Result() + if err != nil { + return err + } + + mu.Lock() + for i, v := range val { + result[i] = result[i] && v + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = result + } + return nil + }) + return cmd +} diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go new file mode 100644 index 0000000..6ee7364 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go @@ -0,0 +1,1283 @@ +package redis_test + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/go-redis/redis/v8" + "github.com/go-redis/redis/v8/internal/hashtag" +) + +type clusterScenario struct { + ports []string + nodeIDs []string + processes map[string]*redisProcess + clients map[string]*redis.Client +} + +func (s *clusterScenario) masters() []*redis.Client { + result := make([]*redis.Client, 3) + for pos, port := range s.ports[:3] { + result[pos] = s.clients[port] + } + return result +} + +func (s *clusterScenario) slaves() []*redis.Client { + result := make([]*redis.Client, 3) + for pos, port := range s.ports[3:] { + result[pos] = s.clients[port] + } + return result +} + +func (s *clusterScenario) addrs() []string { + addrs := make([]string, len(s.ports)) + for i, port := range s.ports { + addrs[i] = net.JoinHostPort("127.0.0.1", port) + } + return addrs +} + +func (s *clusterScenario) newClusterClientUnstable(opt *redis.ClusterOptions) *redis.ClusterClient { + opt.Addrs = s.addrs() + return redis.NewClusterClient(opt) +} + +func (s *clusterScenario) newClusterClient( + ctx context.Context, opt *redis.ClusterOptions, +) *redis.ClusterClient { + client := s.newClusterClientUnstable(opt) + + err := eventually(func() error { + if opt.ClusterSlots != nil { + return nil + } + + state, err := client.LoadState(ctx) + if err != nil { + return err + } + + if !state.IsConsistent(ctx) { + return fmt.Errorf("cluster state is not consistent") + } + + return nil + }, 30*time.Second) + if err != nil { + panic(err) + } + + return client +} + +func (s *clusterScenario) Close() error { + for _, port := range s.ports { + processes[port].Close() + delete(processes, port) + } + return nil +} + +func startCluster(ctx context.Context, scenario *clusterScenario) error { + // Start processes and collect node ids + for pos, port := range scenario.ports { + process, err := startRedis(port, "--cluster-enabled", "yes") + if err != nil { + return err + } + + client := redis.NewClient(&redis.Options{ + Addr: ":" + port, + }) + + info, err := client.ClusterNodes(ctx).Result() + if err != nil { + return err + } + + scenario.processes[port] = process + scenario.clients[port] = client + scenario.nodeIDs[pos] = info[:40] + } + + // Meet cluster nodes. + for _, client := range scenario.clients { + err := client.ClusterMeet(ctx, "127.0.0.1", scenario.ports[0]).Err() + if err != nil { + return err + } + } + + // Bootstrap masters. + slots := []int{0, 5000, 10000, 16384} + for pos, master := range scenario.masters() { + err := master.ClusterAddSlotsRange(ctx, slots[pos], slots[pos+1]-1).Err() + if err != nil { + return err + } + } + + // Bootstrap slaves. + for idx, slave := range scenario.slaves() { + masterID := scenario.nodeIDs[idx] + + // Wait until master is available + err := eventually(func() error { + s := slave.ClusterNodes(ctx).Val() + wanted := masterID + if !strings.Contains(s, wanted) { + return fmt.Errorf("%q does not contain %q", s, wanted) + } + return nil + }, 10*time.Second) + if err != nil { + return err + } + + err = slave.ClusterReplicate(ctx, masterID).Err() + if err != nil { + return err + } + } + + // Wait until all nodes have consistent info. + wanted := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8220", + }, { + ID: "", + Addr: "127.0.0.1:8223", + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8221", + }, { + ID: "", + Addr: "127.0.0.1:8224", + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8222", + }, { + ID: "", + Addr: "127.0.0.1:8225", + }}, + }} + for _, client := range scenario.clients { + err := eventually(func() error { + res, err := client.ClusterSlots(ctx).Result() + if err != nil { + return err + } + return assertSlotsEqual(res, wanted) + }, 30*time.Second) + if err != nil { + return err + } + } + + return nil +} + +func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error { +outerLoop: + for _, s2 := range wanted { + for _, s1 := range slots { + if slotEqual(s1, s2) { + continue outerLoop + } + } + return fmt.Errorf("%v not found in %v", s2, slots) + } + return nil +} + +func slotEqual(s1, s2 redis.ClusterSlot) bool { + if s1.Start != s2.Start { + return false + } + if s1.End != s2.End { + return false + } + if len(s1.Nodes) != len(s2.Nodes) { + return false + } + for i, n1 := range s1.Nodes { + if n1.Addr != s2.Nodes[i].Addr { + return false + } + } + return true +} + +//------------------------------------------------------------------------------ + +var _ = Describe("ClusterClient", func() { + var failover bool + var opt *redis.ClusterOptions + var client *redis.ClusterClient + + assertClusterClient := func() { + It("supports WithContext", func() { + ctx, cancel := context.WithCancel(ctx) + cancel() + + err := client.Ping(ctx).Err() + Expect(err).To(MatchError("context canceled")) + }) + + It("should GET/SET/DEL", func() { + err := client.Get(ctx, "A").Err() + Expect(err).To(Equal(redis.Nil)) + + err = client.Set(ctx, "A", "VALUE", 0).Err() + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() string { + return client.Get(ctx, "A").Val() + }, 30*time.Second).Should(Equal("VALUE")) + + cnt, err := client.Del(ctx, "A").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(cnt).To(Equal(int64(1))) + }) + + It("GET follows redirects", func() { + err := client.Set(ctx, "A", "VALUE", 0).Err() + Expect(err).NotTo(HaveOccurred()) + + if !failover { + Eventually(func() int64 { + nodes, err := client.Nodes(ctx, "A") + if err != nil { + return 0 + } + return nodes[1].Client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(1))) + + Eventually(func() error { + return client.SwapNodes(ctx, "A") + }, 30*time.Second).ShouldNot(HaveOccurred()) + } + + v, err := client.Get(ctx, "A").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(v).To(Equal("VALUE")) + }) + + It("SET follows redirects", func() { + if !failover { + Eventually(func() error { + return client.SwapNodes(ctx, "A") + }, 30*time.Second).ShouldNot(HaveOccurred()) + } + + err := client.Set(ctx, "A", "VALUE", 0).Err() + Expect(err).NotTo(HaveOccurred()) + + v, err := client.Get(ctx, "A").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(v).To(Equal("VALUE")) + }) + + It("distributes keys", func() { + for i := 0; i < 100; i++ { + err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err() + Expect(err).NotTo(HaveOccurred()) + } + + client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + defer GinkgoRecover() + Eventually(func() string { + return master.Info(ctx, "keyspace").Val() + }, 30*time.Second).Should(Or( + ContainSubstring("keys=31"), + ContainSubstring("keys=29"), + ContainSubstring("keys=40"), + )) + return nil + }) + }) + + It("distributes keys when using EVAL", func() { + script := redis.NewScript(` + local r = redis.call('SET', KEYS[1], ARGV[1]) + return r + `) + + var key string + for i := 0; i < 100; i++ { + key = fmt.Sprintf("key%d", i) + err := script.Run(ctx, client, []string{key}, "value").Err() + Expect(err).NotTo(HaveOccurred()) + } + + client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + defer GinkgoRecover() + Eventually(func() string { + return master.Info(ctx, "keyspace").Val() + }, 30*time.Second).Should(Or( + ContainSubstring("keys=31"), + ContainSubstring("keys=29"), + ContainSubstring("keys=40"), + )) + return nil + }) + }) + + It("distributes scripts when using Script Load", func() { + client.ScriptFlush(ctx) + + script := redis.NewScript(`return 'Unique script'`) + + script.Load(ctx, client) + + client.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error { + defer GinkgoRecover() + + val, _ := script.Exists(ctx, shard).Result() + Expect(val[0]).To(Equal(true)) + return nil + }) + }) + + It("checks all shards when using Script Exists", func() { + client.ScriptFlush(ctx) + + script := redis.NewScript(`return 'First script'`) + lostScriptSrc := `return 'Lost script'` + lostScript := redis.NewScript(lostScriptSrc) + + script.Load(ctx, client) + client.Do(ctx, "script", "load", lostScriptSrc) + + val, _ := client.ScriptExists(ctx, script.Hash(), lostScript.Hash()).Result() + + Expect(val).To(Equal([]bool{true, false})) + }) + + It("flushes scripts from all shards when using ScriptFlush", func() { + script := redis.NewScript(`return 'Unnecessary script'`) + script.Load(ctx, client) + + val, _ := client.ScriptExists(ctx, script.Hash()).Result() + Expect(val).To(Equal([]bool{true})) + + client.ScriptFlush(ctx) + + val, _ = client.ScriptExists(ctx, script.Hash()).Result() + Expect(val).To(Equal([]bool{false})) + }) + + It("supports Watch", func() { + var incr func(string) error + + // Transactionally increments key using GET and SET commands. + incr = func(key string) error { + err := client.Watch(ctx, func(tx *redis.Tx) error { + n, err := tx.Get(ctx, key).Int64() + if err != nil && err != redis.Nil { + return err + } + + _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Set(ctx, key, strconv.FormatInt(n+1, 10), 0) + return nil + }) + return err + }, key) + if err == redis.TxFailedErr { + return incr(key) + } + return err + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + + err := incr("key") + Expect(err).NotTo(HaveOccurred()) + }() + } + wg.Wait() + + Eventually(func() string { + return client.Get(ctx, "key").Val() + }, 30*time.Second).Should(Equal("100")) + }) + + Describe("pipelining", func() { + var pipe *redis.Pipeline + + assertPipeline := func() { + keys := []string{"A", "B", "C", "D", "E", "F", "G"} + + It("follows redirects", func() { + if !failover { + for _, key := range keys { + Eventually(func() error { + return client.SwapNodes(ctx, key) + }, 30*time.Second).ShouldNot(HaveOccurred()) + } + } + + for i, key := range keys { + pipe.Set(ctx, key, key+"_value", 0) + pipe.Expire(ctx, key, time.Duration(i+1)*time.Hour) + } + cmds, err := pipe.Exec(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(cmds).To(HaveLen(14)) + + _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + defer GinkgoRecover() + Eventually(func() int64 { + return node.DBSize(ctx).Val() + }, 30*time.Second).ShouldNot(BeZero()) + return nil + }) + + if !failover { + for _, key := range keys { + Eventually(func() error { + return client.SwapNodes(ctx, key) + }, 30*time.Second).ShouldNot(HaveOccurred()) + } + } + + for _, key := range keys { + pipe.Get(ctx, key) + pipe.TTL(ctx, key) + } + cmds, err = pipe.Exec(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(cmds).To(HaveLen(14)) + + for i, key := range keys { + get := cmds[i*2].(*redis.StringCmd) + Expect(get.Val()).To(Equal(key + "_value")) + + ttl := cmds[(i*2)+1].(*redis.DurationCmd) + dur := time.Duration(i+1) * time.Hour + Expect(ttl.Val()).To(BeNumerically("~", dur, 30*time.Second)) + } + }) + + It("works with missing keys", func() { + pipe.Set(ctx, "A", "A_value", 0) + pipe.Set(ctx, "C", "C_value", 0) + _, err := pipe.Exec(ctx) + Expect(err).NotTo(HaveOccurred()) + + a := pipe.Get(ctx, "A") + b := pipe.Get(ctx, "B") + c := pipe.Get(ctx, "C") + cmds, err := pipe.Exec(ctx) + Expect(err).To(Equal(redis.Nil)) + Expect(cmds).To(HaveLen(3)) + + Expect(a.Err()).NotTo(HaveOccurred()) + Expect(a.Val()).To(Equal("A_value")) + + Expect(b.Err()).To(Equal(redis.Nil)) + Expect(b.Val()).To(Equal("")) + + Expect(c.Err()).NotTo(HaveOccurred()) + Expect(c.Val()).To(Equal("C_value")) + }) + } + + Describe("with Pipeline", func() { + BeforeEach(func() { + pipe = client.Pipeline().(*redis.Pipeline) + }) + + AfterEach(func() { + Expect(pipe.Close()).NotTo(HaveOccurred()) + }) + + assertPipeline() + }) + + Describe("with TxPipeline", func() { + BeforeEach(func() { + pipe = client.TxPipeline().(*redis.Pipeline) + }) + + AfterEach(func() { + Expect(pipe.Close()).NotTo(HaveOccurred()) + }) + + assertPipeline() + }) + }) + + It("supports PubSub", func() { + pubsub := client.Subscribe(ctx, "mychannel") + defer pubsub.Close() + + Eventually(func() error { + _, err := client.Publish(ctx, "mychannel", "hello").Result() + if err != nil { + return err + } + + msg, err := pubsub.ReceiveTimeout(ctx, time.Second) + if err != nil { + return err + } + + _, ok := msg.(*redis.Message) + if !ok { + return fmt.Errorf("got %T, wanted *redis.Message", msg) + } + + return nil + }, 30*time.Second).ShouldNot(HaveOccurred()) + }) + + It("supports PubSub.Ping without channels", func() { + pubsub := client.Subscribe(ctx) + defer pubsub.Close() + + err := pubsub.Ping(ctx) + Expect(err).NotTo(HaveOccurred()) + }) + } + + Describe("ClusterClient", func() { + BeforeEach(func() { + opt = redisClusterOptions() + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("returns pool stats", func() { + stats := client.PoolStats() + Expect(stats).To(BeAssignableToTypeOf(&redis.PoolStats{})) + }) + + It("returns an error when there are no attempts left", func() { + opt := redisClusterOptions() + opt.MaxRedirects = -1 + client := cluster.newClusterClient(ctx, opt) + + Eventually(func() error { + return client.SwapNodes(ctx, "A") + }, 30*time.Second).ShouldNot(HaveOccurred()) + + err := client.Get(ctx, "A").Err() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("MOVED")) + + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("calls fn for every master node", func() { + for i := 0; i < 10; i++ { + Expect(client.Set(ctx, strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred()) + } + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + size, err := client.DBSize(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(size).To(Equal(int64(0))) + }) + + It("should CLUSTER SLOTS", func() { + res, err := client.ClusterSlots(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(HaveLen(3)) + + wanted := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8220", + }, { + ID: "", + Addr: "127.0.0.1:8223", + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8221", + }, { + ID: "", + Addr: "127.0.0.1:8224", + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + ID: "", + Addr: "127.0.0.1:8222", + }, { + ID: "", + Addr: "127.0.0.1:8225", + }}, + }} + Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred()) + }) + + It("should CLUSTER NODES", func() { + res, err := client.ClusterNodes(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(res)).To(BeNumerically(">", 400)) + }) + + It("should CLUSTER INFO", func() { + res, err := client.ClusterInfo(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(ContainSubstring("cluster_known_nodes:6")) + }) + + It("should CLUSTER KEYSLOT", func() { + hashSlot, err := client.ClusterKeySlot(ctx, "somekey").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey")))) + }) + + It("should CLUSTER GETKEYSINSLOT", func() { + keys, err := client.ClusterGetKeysInSlot(ctx, hashtag.Slot("somekey"), 1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(keys)).To(Equal(0)) + }) + + It("should CLUSTER COUNT-FAILURE-REPORTS", func() { + n, err := client.ClusterCountFailureReports(ctx, cluster.nodeIDs[0]).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(n).To(Equal(int64(0))) + }) + + It("should CLUSTER COUNTKEYSINSLOT", func() { + n, err := client.ClusterCountKeysInSlot(ctx, 10).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(n).To(Equal(int64(0))) + }) + + It("should CLUSTER SAVECONFIG", func() { + res, err := client.ClusterSaveConfig(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal("OK")) + }) + + It("should CLUSTER SLAVES", func() { + nodesList, err := client.ClusterSlaves(ctx, cluster.nodeIDs[0]).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(nodesList).Should(ContainElement(ContainSubstring("slave"))) + Expect(nodesList).Should(HaveLen(1)) + }) + + It("should RANDOMKEY", func() { + const nkeys = 100 + + for i := 0; i < nkeys; i++ { + err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err() + Expect(err).NotTo(HaveOccurred()) + } + + var keys []string + addKey := func(key string) { + for _, k := range keys { + if k == key { + return + } + } + keys = append(keys, key) + } + + for i := 0; i < nkeys*10; i++ { + key := client.RandomKey(ctx).Val() + addKey(key) + } + + Expect(len(keys)).To(BeNumerically("~", nkeys, nkeys/10)) + }) + + It("supports Process hook", func() { + err := client.Ping(ctx).Err() + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + return node.Ping(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + var stack []string + + clusterHook := &hook{ + beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) { + Expect(cmd.String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcess") + return ctx, nil + }, + afterProcess: func(ctx context.Context, cmd redis.Cmder) error { + Expect(cmd.String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcess") + return nil + }, + } + client.AddHook(clusterHook) + + nodeHook := &hook{ + beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) { + Expect(cmd.String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcess") + return ctx, nil + }, + afterProcess: func(ctx context.Context, cmd redis.Cmder) error { + Expect(cmd.String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcess") + return nil + }, + } + + _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + node.AddHook(nodeHook) + return nil + }) + + err = client.Ping(ctx).Err() + Expect(err).NotTo(HaveOccurred()) + Expect(stack).To(Equal([]string{ + "cluster.BeforeProcess", + "shard.BeforeProcess", + "shard.AfterProcess", + "cluster.AfterProcess", + })) + + clusterHook.beforeProcess = nil + clusterHook.afterProcess = nil + nodeHook.beforeProcess = nil + nodeHook.afterProcess = nil + }) + + It("supports Pipeline hook", func() { + err := client.Ping(ctx).Err() + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + return node.Ping(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + var stack []string + + client.AddHook(&hook{ + beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcessPipeline") + return ctx, nil + }, + afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcessPipeline") + return nil + }, + }) + + _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + node.AddHook(&hook{ + beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcessPipeline") + return ctx, nil + }, + afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcessPipeline") + return nil + }, + }) + return nil + }) + + _, err = client.Pipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + Expect(stack).To(Equal([]string{ + "cluster.BeforeProcessPipeline", + "shard.BeforeProcessPipeline", + "shard.AfterProcessPipeline", + "cluster.AfterProcessPipeline", + })) + }) + + It("supports TxPipeline hook", func() { + err := client.Ping(ctx).Err() + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + return node.Ping(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + var stack []string + + client.AddHook(&hook{ + beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcessPipeline") + return ctx, nil + }, + afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcessPipeline") + return nil + }, + }) + + _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { + node.AddHook(&hook{ + beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcessPipeline") + return ctx, nil + }, + afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcessPipeline") + return nil + }, + }) + return nil + }) + + _, err = client.TxPipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + Expect(stack).To(Equal([]string{ + "cluster.BeforeProcessPipeline", + "shard.BeforeProcessPipeline", + "shard.AfterProcessPipeline", + "cluster.AfterProcessPipeline", + })) + }) + + It("should return correct replica for key", func() { + client, err := client.SlaveForKey(ctx, "test") + Expect(err).ToNot(HaveOccurred()) + info := client.Info(ctx, "server") + Expect(info.Val()).Should(ContainSubstring("tcp_port:8224")) + }) + + It("should return correct master for key", func() { + client, err := client.MasterForKey(ctx, "test") + Expect(err).ToNot(HaveOccurred()) + info := client.Info(ctx, "server") + Expect(info.Val()).Should(ContainSubstring("tcp_port:8221")) + }) + + assertClusterClient() + }) + + Describe("ClusterClient with RouteByLatency", func() { + BeforeEach(func() { + opt = redisClusterOptions() + opt.RouteByLatency = true + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + Eventually(func() int64 { + return client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(0))) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + err := client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + return slave.ReadWrite(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.Close() + Expect(err).NotTo(HaveOccurred()) + }) + + assertClusterClient() + }) + + Describe("ClusterClient with ClusterSlots", func() { + BeforeEach(func() { + failover = true + + opt = redisClusterOptions() + opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { + slots := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard1Port, + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard2Port, + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard3Port, + }}, + }} + return slots, nil + } + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + Eventually(func() int64 { + return client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(0))) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + failover = false + + err := client.Close() + Expect(err).NotTo(HaveOccurred()) + }) + + assertClusterClient() + }) + + Describe("ClusterClient with RouteRandomly and ClusterSlots", func() { + BeforeEach(func() { + failover = true + + opt = redisClusterOptions() + opt.RouteRandomly = true + opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { + slots := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard1Port, + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard2Port, + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + Addr: ":" + ringShard3Port, + }}, + }} + return slots, nil + } + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + Eventually(func() int64 { + return client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(0))) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + failover = false + + err := client.Close() + Expect(err).NotTo(HaveOccurred()) + }) + + assertClusterClient() + }) + + Describe("ClusterClient with ClusterSlots with multiple nodes per slot", func() { + BeforeEach(func() { + failover = true + + opt = redisClusterOptions() + opt.ReadOnly = true + opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { + slots := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + Addr: ":8220", + }, { + Addr: ":8223", + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + Addr: ":8221", + }, { + Addr: ":8224", + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + Addr: ":8222", + }, { + Addr: ":8225", + }}, + }} + return slots, nil + } + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + Eventually(func() int64 { + return client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(0))) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + failover = false + + err := client.Close() + Expect(err).NotTo(HaveOccurred()) + }) + + assertClusterClient() + }) +}) + +var _ = Describe("ClusterClient without nodes", func() { + var client *redis.ClusterClient + + BeforeEach(func() { + client = redis.NewClusterClient(&redis.ClusterOptions{}) + }) + + AfterEach(func() { + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("Ping returns an error", func() { + err := client.Ping(ctx).Err() + Expect(err).To(MatchError("redis: cluster has no nodes")) + }) + + It("pipeline returns an error", func() { + _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + Expect(err).To(MatchError("redis: cluster has no nodes")) + }) +}) + +var _ = Describe("ClusterClient without valid nodes", func() { + var client *redis.ClusterClient + + BeforeEach(func() { + client = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: []string{redisAddr}, + }) + }) + + AfterEach(func() { + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("returns an error", func() { + err := client.Ping(ctx).Err() + Expect(err).To(MatchError("ERR This instance has cluster support disabled")) + }) + + It("pipeline returns an error", func() { + _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + Expect(err).To(MatchError("ERR This instance has cluster support disabled")) + }) +}) + +var _ = Describe("ClusterClient with unavailable Cluster", func() { + var client *redis.ClusterClient + + BeforeEach(func() { + for _, node := range cluster.clients { + err := node.ClientPause(ctx, 5*time.Second).Err() + Expect(err).NotTo(HaveOccurred()) + } + + opt := redisClusterOptions() + opt.ReadTimeout = 250 * time.Millisecond + opt.WriteTimeout = 250 * time.Millisecond + opt.MaxRedirects = 1 + client = cluster.newClusterClientUnstable(opt) + }) + + AfterEach(func() { + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("recovers when Cluster recovers", func() { + err := client.Ping(ctx).Err() + Expect(err).To(HaveOccurred()) + + Eventually(func() error { + return client.Ping(ctx).Err() + }, "30s").ShouldNot(HaveOccurred()) + }) +}) + +var _ = Describe("ClusterClient timeout", func() { + var client *redis.ClusterClient + + AfterEach(func() { + _ = client.Close() + }) + + testTimeout := func() { + It("Ping timeouts", func() { + err := client.Ping(ctx).Err() + Expect(err).To(HaveOccurred()) + Expect(err.(net.Error).Timeout()).To(BeTrue()) + }) + + It("Pipeline timeouts", func() { + _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + Expect(err).To(HaveOccurred()) + Expect(err.(net.Error).Timeout()).To(BeTrue()) + }) + + It("Tx timeouts", func() { + err := client.Watch(ctx, func(tx *redis.Tx) error { + return tx.Ping(ctx).Err() + }, "foo") + Expect(err).To(HaveOccurred()) + Expect(err.(net.Error).Timeout()).To(BeTrue()) + }) + + It("Tx Pipeline timeouts", func() { + err := client.Watch(ctx, func(tx *redis.Tx) error { + _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { + pipe.Ping(ctx) + return nil + }) + return err + }, "foo") + Expect(err).To(HaveOccurred()) + Expect(err.(net.Error).Timeout()).To(BeTrue()) + }) + } + + const pause = 5 * time.Second + + Context("read/write timeout", func() { + BeforeEach(func() { + opt := redisClusterOptions() + opt.ReadTimeout = 250 * time.Millisecond + opt.WriteTimeout = 250 * time.Millisecond + opt.MaxRedirects = 1 + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { + return client.ClientPause(ctx, pause).Err() + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { + defer GinkgoRecover() + Eventually(func() error { + return client.Ping(ctx).Err() + }, 2*pause).ShouldNot(HaveOccurred()) + return nil + }) + }) + + testTimeout() + }) +}) diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go new file mode 100644 index 0000000..4bb12a8 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go @@ -0,0 +1,3478 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hscan" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + firstKeyPos() int8 + SetFirstKeyPos(int8) + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + if pos := cmd.firstKeyPos(); pos != 0 { + return int(pos) + } + + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info != nil { + return int(info.FirstKeyPos) + } + return 0 +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + keyPos int8 + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } +} + +func (cmd *baseCmd) firstKeyPos() int8 { + return cmd.keyPos +} + +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { + cmd.keyPos = keyPos +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) SetVal(val interface{}) { + cmd.val = val +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + return toString(cmd.val) +} + +func toString(val interface{}) (string, error) { + switch val := val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toInt64(cmd.val) +} + +func toInt64(val interface{}) (int64, error) { + switch val := val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toUint64(cmd.val) +} + +func toUint64(val interface{}) (uint64, error) { + switch val := val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat32(cmd.val) +} + +func toFloat32(val interface{}) (float32, error) { + switch val := val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat64(cmd.val) +} + +func toFloat64(val interface{}) (float64, error) { + switch val := val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return toBool(cmd.val) +} + +func toBool(val interface{}) (bool, error) { + switch val := val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) Slice() ([]interface{}, error) { + if cmd.err != nil { + return nil, cmd.err + } + switch val := cmd.val.(type) { + case []interface{}: + return val, nil + default: + return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) + } +} + +func (cmd *Cmd) StringSlice() ([]string, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + ss := make([]string, len(slice)) + for i, iface := range slice { + val, err := toString(iface) + if err != nil { + return nil, err + } + ss[i] = val + } + return ss, nil +} + +func (cmd *Cmd) Int64Slice() ([]int64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]int64, len(slice)) + for i, iface := range slice { + val, err := toInt64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Uint64Slice() ([]uint64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]uint64, len(slice)) + for i, iface := range slice { + val, err := toUint64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Float32Slice() ([]float32, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float32, len(slice)) + for i, iface := range slice { + val, err := toFloat32(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) Float64Slice() ([]float64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float64, len(slice)) + for i, iface := range slice { + val, err := toFloat64(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) BoolSlice() ([]bool, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + bools := make([]bool, len(slice)) + for i, iface := range slice { + val, err := toBool(iface) + if err != nil { + return nil, err + } + bools[i] = val + } + return bools, nil +} + +func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadReply(sliceParser) + return err +} + +// sliceParser implements proto.MultiBulkParse. +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, n) + for i := 0; i < len(vals); i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals[i] = nil + continue + } + if err, ok := err.(proto.RedisError); ok { + vals[i] = err + continue + } + return nil, err + } + vals[i] = v + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) SetVal(val []interface{}) { + cmd.val = val +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *SliceCmd) Scan(dst interface{}) error { + if cmd.err != nil { + return cmd.err + } + + // Pass the list of keys and values. + // Skip the first two args for: HMGET key + var args []interface{} + if cmd.args[0] == "hmget" { + args = cmd.args[2:] + } else { + // Otherwise, it's: MGET field field ... + args = cmd.args[1:] + } + + return hscan.Scan(dst, args, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(sliceParser) + if err != nil { + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) SetVal(val int64) { + cmd.val = val +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadIntReply() + return err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) SetVal(val []int64) { + cmd.val = val +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + num, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = num + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) SetVal(val time.Duration) { + cmd.val = val +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadIntReply() + if err != nil { + return err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) SetVal(val time.Time) { + cmd.val = val +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + cmd.val = time.Unix(sec, microsec*1000) + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) SetVal(val bool) { + cmd.val = val +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if err == Nil { + cmd.val = false + return nil + } + if err != nil { + return err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + return fmt.Errorf("got %T, wanted int64 or string", v) + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return strconv.ParseBool(cmd.val) +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) SetVal(val float64) { + cmd.val = val +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadFloatReply() + return err +} + +//------------------------------------------------------------------------------ + +type FloatSliceCmd struct { + baseCmd + + val []float64 +} + +var _ Cmder = (*FloatSliceCmd)(nil) + +func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { + return &FloatSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatSliceCmd) SetVal(val []float64) { + cmd.val = val +} + +func (cmd *FloatSliceCmd) Val() []float64 { + return cmd.val +} + +func (cmd *FloatSliceCmd) Result() ([]float64, error) { + return cmd.val, cmd.err +} + +func (cmd *FloatSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]float64, n) + for i := 0; i < len(cmd.val); i++ { + switch num, err := rd.ReadFloatReply(); { + case err == Nil: + cmd.val[i] = 0 + case err != nil: + return nil, err + default: + cmd.val[i] = num + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) SetVal(val []string) { + cmd.val = val +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return nil, err + default: + cmd.val[i] = s + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) SetVal(val []bool) { + cmd.val = val +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = n == 1 + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStringMapCmd) SetVal(val map[string]string) { + cmd.val = val +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *StringStringMapCmd) Scan(dest interface{}) error { + if cmd.err != nil { + return cmd.err + } + + strct, err := hscan.Struct(dest) + if err != nil { + return err + } + + for k, v := range cmd.val { + if err := strct.Scan(k, v); err != nil { + return err + } + } + + return nil +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val[key] = value + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { + cmd.val = val +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + cmd.val[key] = n + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { + cmd.val = val +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + cmd.val[key] = struct{}{} + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { + cmd.val = val +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var err error + cmd.val, err = readXMessageSlice(rd) + return err +} + +func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + msgs := make([]XMessage, n) + for i := 0; i < n; i++ { + var err error + msgs[i], err = readXMessage(rd) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +func readXMessage(rd *proto.Reader) (XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return XMessage{}, err + } + if n != 2 { + return XMessage{}, fmt.Errorf("got %d, wanted 2", n) + } + + id, err := rd.ReadString() + if err != nil { + return XMessage{}, err + } + + var values map[string]interface{} + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + if err != proto.Nil { + return XMessage{}, err + } + } else { + values = v.(map[string]interface{}) + } + + return XMessage{ + ID: id, + Values: values, + }, nil +} + +// stringInterfaceMapParser implements proto.MultiBulkParse. +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) SetVal(val []XStream) { + cmd.val = val +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + msgs, err := readXMessageSlice(rd) + if err != nil { + return nil, err + } + + cmd.val[i] = XStream{ + Stream: stream, + Messages: msgs, + } + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) SetVal(val *XPending) { + cmd.val = val +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if cmd.val.Consumers == nil { + cmd.val.Consumers = make(map[string]int64) + } + cmd.val.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { + cmd.val = val +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = append(cmd.val, XPendingExt{ + ID: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimCmd struct { + baseCmd + + start string + val []XMessage +} + +var _ Cmder = (*XAutoClaimCmd)(nil) + +func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { + return &XAutoClaimCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val, err = readXMessageSlice(rd) + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimJustIDCmd struct { + baseCmd + + start string + val []string +} + +var _ Cmder = (*XAutoClaimJustIDCmd)(nil) + +func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { + return &XAutoClaimJustIDCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimJustIDCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XInfoConsumersCmd struct { + baseCmd + val []XInfoConsumer +} + +type XInfoConsumer struct { + Name string + Pending int64 + Idle int64 +} + +var _ Cmder = (*XInfoConsumersCmd)(nil) + +func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { + return &XInfoConsumersCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + }, + } +} + +func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { + cmd.val = val +} + +func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { + return cmd.val +} + +func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoConsumersCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoConsumer, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXConsumerInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { + var consumer XInfoConsumer + + n, err := rd.ReadArrayLen() + if err != nil { + return consumer, err + } + if n != 6 { + return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) + } + + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return consumer, err + } + + val, err := rd.ReadString() + if err != nil { + return consumer, err + } + + switch key { + case "name": + consumer.Name = val + case "pending": + consumer.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + case "idle": + consumer.Idle, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + default: + return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) + } + } + + return consumer, nil +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroup +} + +type XInfoGroup struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { + cmd.val = val +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoGroup, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXGroupInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { + var group XInfoGroup + + n, err := rd.ReadArrayLen() + if err != nil { + return group, err + } + if n != 8 { + return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) + } + + for i := 0; i < 4; i++ { + key, err := rd.ReadString() + if err != nil { + return group, err + } + + val, err := rd.ReadString() + if err != nil { + return group, err + } + + switch key { + case "name": + group.Name = val + case "consumers": + group.Consumers, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "pending": + group.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "last-delivered-id": + group.LastDeliveredID = val + default: + return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) + } + } + + return group, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamCmd struct { + baseCmd + val *XInfoStream +} + +type XInfoStream struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + FirstEntry XMessage + LastEntry XMessage +} + +var _ Cmder = (*XInfoStreamCmd)(nil) + +func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { + return &XInfoStreamCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + }, + } +} + +func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { + cmd.val = val +} + +func (cmd *XInfoStreamCmd) Val() *XInfoStream { + return cmd.val +} + +func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(xStreamInfoParser) + if err != nil { + return err + } + cmd.val = v.(*XInfoStream) + return nil +} + +func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 14 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 14", n) + } + var info XInfoStream + for i := 0; i < 7; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + switch key { + case "length": + info.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + info.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + info.RadixTreeNodes, err = rd.ReadIntReply() + case "groups": + info.Groups, err = rd.ReadIntReply() + case "last-generated-id": + info.LastGeneratedID, err = rd.ReadString() + case "first-entry": + info.FirstEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + case "last-entry": + info.LastEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return nil, err + } + } + return &info, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamFullCmd struct { + baseCmd + val *XInfoStreamFull +} + +type XInfoStreamFull struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + Entries []XMessage + Groups []XInfoStreamGroup +} + +type XInfoStreamGroup struct { + Name string + LastDeliveredID string + PelCount int64 + Pending []XInfoStreamGroupPending + Consumers []XInfoStreamConsumer +} + +type XInfoStreamGroupPending struct { + ID string + Consumer string + DeliveryTime time.Time + DeliveryCount int64 +} + +type XInfoStreamConsumer struct { + Name string + SeenTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending +} + +type XInfoStreamConsumerPending struct { + ID string + DeliveryTime time.Time + DeliveryCount int64 +} + +var _ Cmder = (*XInfoStreamFullCmd)(nil) + +func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { + return &XInfoStreamFullCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { + cmd.val = val +} + +func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { + return cmd.val +} + +func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamFullCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + if n != 12 { + return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 12", n) + } + + cmd.val = &XInfoStreamFull{} + + for i := 0; i < 6; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "length": + cmd.val.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadIntReply() + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + case "entries": + cmd.val.Entries, err = readXMessageSlice(rd) + case "groups": + cmd.val.Groups, err = readStreamGroups(rd) + default: + return fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return err + } + } + return nil +} + +func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + groups := make([]XInfoStreamGroup, 0, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 10 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 10", nn) + } + + group := XInfoStreamGroup{} + + for f := 0; f < 5; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + case "pel-count": + group.PelCount, err = rd.ReadIntReply() + case "pending": + group.Pending, err = readXInfoStreamGroupPending(rd) + case "consumers": + group.Consumers, err = readXInfoStreamConsumers(rd) + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + + if err != nil { + return nil, err + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + pending := make([]XInfoStreamGroupPending, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 4 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 4", nn) + } + + p := XInfoStreamGroupPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + p.Consumer, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + pending = append(pending, p) + } + + return pending, nil +} + +func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + consumers := make([]XInfoStreamConsumer, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 8 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 8", nn) + } + + c := XInfoStreamConsumer{} + + for f := 0; f < 4; f++ { + cKey, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch cKey { + case "name": + c.Name, err = rd.ReadString() + case "seen-time": + seen, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) + case "pel-count": + c.PelCount, err = rd.ReadIntReply() + case "pending": + pendingNumber, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) + + for pn := 0; pn < pendingNumber; pn++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 3 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 3", nn) + } + + p := XInfoStreamConsumerPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + c.Pending = append(c.Pending, p) + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", cKey) + } + if err != nil { + return nil, err + } + } + consumers = append(consumers, c) + } + + return consumers, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) SetVal(val []Z) { + cmd.val = val +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]Z, n/2) + for i := 0; i < len(cmd.val); i++ { + member, err := rd.ReadString() + if err != nil { + return nil, err + } + + score, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = Z{ + Member: member, + Score: score, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { + cmd.val = val +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + cmd.val = &ZWithKey{} + var err error + + cmd.val.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { + cmd.page = page + cmd.cursor = cursor +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { + cmd.page, cmd.cursor, err = rd.ReadScanReply() + return err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { + cmd.val = val +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]ClusterSlot, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].ID = id + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { + cmd.locations = locations +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if err != nil { + return err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + // TODO: avoid copying + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +//------------------------------------------------------------------------------ + +// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. +type GeoSearchQuery struct { + Member string + + // Latitude and Longitude when using FromLonLat option. + Longitude float64 + Latitude float64 + + // Distance and unit when using ByRadius option. + // Can use m, km, ft, or mi. Default is km. + Radius float64 + RadiusUnit string + + // Height, width and unit when using ByBox option. + // Can be m, km, ft, or mi. Default is km. + BoxWidth float64 + BoxHeight float64 + BoxUnit string + + // Can be ASC or DESC. Default is no sort order. + Sort string + Count int + CountAny bool +} + +type GeoSearchLocationQuery struct { + GeoSearchQuery + + WithCoord bool + WithDist bool + WithHash bool +} + +type GeoSearchStoreQuery struct { + GeoSearchQuery + + // When using the StoreDist option, the command stores the items in a + // sorted set populated with their distance from the center of the circle or box, + // as a floating-point number, in the same unit specified for that shape. + StoreDist bool +} + +func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { + args = geoSearchArgs(&q.GeoSearchQuery, args) + + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithHash { + args = append(args, "withhash") + } + + return args +} + +func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { + if q.Member != "" { + args = append(args, "frommember", q.Member) + } else { + args = append(args, "fromlonlat", q.Longitude, q.Latitude) + } + + if q.Radius > 0 { + if q.RadiusUnit == "" { + q.RadiusUnit = "km" + } + args = append(args, "byradius", q.Radius, q.RadiusUnit) + } else { + if q.BoxUnit == "" { + q.BoxUnit = "km" + } + args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) + } + + if q.Sort != "" { + args = append(args, q.Sort) + } + + if q.Count > 0 { + args = append(args, "count", q.Count) + if q.CountAny { + args = append(args, "any") + } + } + + return args +} + +type GeoSearchLocationCmd struct { + baseCmd + + opt *GeoSearchLocationQuery + val []GeoLocation +} + +var _ Cmder = (*GeoSearchLocationCmd)(nil) + +func NewGeoSearchLocationCmd( + ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, +) *GeoSearchLocationCmd { + return &GeoSearchLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + opt: opt, + } +} + +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + +func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { + return cmd.val +} + +func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { + return cmd.val, cmd.err +} + +func (cmd *GeoSearchLocationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]GeoLocation, n) + for i := 0; i < n; i++ { + _, err = rd.ReadArrayLen() + if err != nil { + return err + } + + var loc GeoLocation + + loc.Name, err = rd.ReadString() + if err != nil { + return err + } + if cmd.opt.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + if cmd.opt.WithHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return err + } + } + if cmd.opt.WithCoord { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn != 2 { + return fmt.Errorf("got %d coordinates, expected 2", nn) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + + cmd.val[i] = loc + } + + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { + cmd.val = val +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]*GeoPos, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + longitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + latitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + return nil, nil + }) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { + cmd.val = val +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + cmd.val[vv.Name] = vv + } + return nil, nil + }) + return err +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + + switch n { + case numArgRedis5, numArgRedis6: + // continue + default: + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) + } + + var cmd CommandInfo + var err error + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.Flags = make([]string, n) + for i := 0; i < len(cmd.Flags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.Flags[i] = "" + case err != nil: + return nil, err + default: + cmd.Flags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + if n == numArgRedis5 { + return &cmd, nil + } + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.ACLFlags = make([]string, n) + for i := 0; i < len(cmd.ACLFlags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.ACLFlags[i] = "" + case err != nil: + return nil, err + default: + cmd.ACLFlags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func(ctx context.Context) (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn(ctx) + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} + +//------------------------------------------------------------------------------ + +type SlowLog struct { + ID int64 + Time time.Time + Duration time.Duration + Args []string + // These are also optional fields emitted only by Redis 4.0 or greater: + // https://redis.io/commands/slowlog#output-format + ClientAddr string + ClientName string +} + +type SlowLogCmd struct { + baseCmd + + val []SlowLog +} + +var _ Cmder = (*SlowLogCmd)(nil) + +func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { + return &SlowLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SlowLogCmd) SetVal(val []SlowLog) { + cmd.val = val +} + +func (cmd *SlowLogCmd) Val() []SlowLog { + return cmd.val +} + +func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *SlowLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]SlowLog, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 4 { + err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) + return nil, err + } + + id, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + createdAt, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + createdAtTime := time.Unix(createdAt, 0) + + costs, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + costsDuration := time.Duration(costs) * time.Microsecond + + cmdLen, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if cmdLen < 1 { + err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) + return nil, err + } + + cmdString := make([]string, cmdLen) + for i := 0; i < cmdLen; i++ { + cmdString[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + var address, name string + for i := 4; i < n; i++ { + str, err := rd.ReadString() + if err != nil { + return nil, err + } + if i == 4 { + address = str + } else if i == 5 { + name = str + } + } + + cmd.val[i] = SlowLog{ + ID: id, + Time: createdAtTime, + Duration: costsDuration, + Args: cmdString, + ClientAddr: address, + ClientName: name, + } + } + return nil, nil + }) + return err +} diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go new file mode 100644 index 0000000..168f9f6 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go @@ -0,0 +1,96 @@ +package redis_test + +import ( + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + redis "github.com/go-redis/redis/v8" +) + +var _ = Describe("Cmd", func() { + var client *redis.Client + + BeforeEach(func() { + client = redis.NewClient(redisOptions()) + Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("implements Stringer", func() { + set := client.Set(ctx, "foo", "bar", 0) + Expect(set.String()).To(Equal("set foo bar: OK")) + + get := client.Get(ctx, "foo") + Expect(get.String()).To(Equal("get foo: bar")) + }) + + It("has val/err", func() { + set := client.Set(ctx, "key", "hello", 0) + Expect(set.Err()).NotTo(HaveOccurred()) + Expect(set.Val()).To(Equal("OK")) + + get := client.Get(ctx, "key") + Expect(get.Err()).NotTo(HaveOccurred()) + Expect(get.Val()).To(Equal("hello")) + + Expect(set.Err()).NotTo(HaveOccurred()) + Expect(set.Val()).To(Equal("OK")) + }) + + It("has helpers", func() { + set := client.Set(ctx, "key", "10", 0) + Expect(set.Err()).NotTo(HaveOccurred()) + + n, err := client.Get(ctx, "key").Int64() + Expect(err).NotTo(HaveOccurred()) + Expect(n).To(Equal(int64(10))) + + un, err := client.Get(ctx, "key").Uint64() + Expect(err).NotTo(HaveOccurred()) + Expect(un).To(Equal(uint64(10))) + + f, err := client.Get(ctx, "key").Float64() + Expect(err).NotTo(HaveOccurred()) + Expect(f).To(Equal(float64(10))) + }) + + It("supports float32", func() { + f := float32(66.97) + + err := client.Set(ctx, "float_key", f, 0).Err() + Expect(err).NotTo(HaveOccurred()) + + val, err := client.Get(ctx, "float_key").Float32() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(Equal(f)) + }) + + It("supports time.Time", func() { + tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC) + + err := client.Set(ctx, "time_key", tm, 0).Err() + Expect(err).NotTo(HaveOccurred()) + + s, err := client.Get(ctx, "time_key").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(s).To(Equal("2019-01-01T09:45:10.000222125Z")) + + tm2, err := client.Get(ctx, "time_key").Time() + Expect(err).NotTo(HaveOccurred()) + Expect(tm2).To(BeTemporally("==", tm)) + }) + + It("allows to set custom error", func() { + e := errors.New("custom error") + cmd := redis.Cmd{} + cmd.SetErr(e) + _, err := cmd.Result() + Expect(err).To(Equal(e)) + }) +}) diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go new file mode 100644 index 0000000..bbfe089 --- /dev/null +++ b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go @@ -0,0 +1,3475 @@ +package redis + +import ( + "context" + "errors" + "io" + "time" + + "github.com/go-redis/redis/v8/internal" +) + +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +// For example: +// +// rdb.Set(ctx, key, value, redis.KeepTTL) +const KeepTTL = -1 + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1ms", + dur, time.Millisecond, + ) + return 1 + } + return int64(dur / time.Millisecond) +} + +func formatSec(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1s", + dur, time.Second, + ) + return 1 + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + return appendArg(dst, src[0]) + } + + dst = append(dst, src...) + return dst +} + +func appendArg(dst []interface{}, arg interface{}) []interface{} { + switch arg := arg.(type) { + case []string: + for _, s := range arg { + dst = append(dst, s) + } + return dst + case []interface{}: + dst = append(dst, arg...) + return dst + case map[string]interface{}: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + case map[string]string: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + default: + return append(dst, arg) + } +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command(ctx context.Context) *CommandsInfoCmd + ClientGetName(ctx context.Context) *StringCmd + Echo(ctx context.Context, message interface{}) *StringCmd + Ping(ctx context.Context) *StatusCmd + Quit(ctx context.Context) *StatusCmd + Del(ctx context.Context, keys ...string) *IntCmd + Unlink(ctx context.Context, keys ...string) *IntCmd + Dump(ctx context.Context, key string) *StringCmd + Exists(ctx context.Context, keys ...string) *IntCmd + Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + Keys(ctx context.Context, pattern string) *StringSliceCmd + Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd + Move(ctx context.Context, key string, db int) *BoolCmd + ObjectRefCount(ctx context.Context, key string) *IntCmd + ObjectEncoding(ctx context.Context, key string) *StringCmd + ObjectIdleTime(ctx context.Context, key string) *DurationCmd + Persist(ctx context.Context, key string) *BoolCmd + PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PTTL(ctx context.Context, key string) *DurationCmd + RandomKey(ctx context.Context) *StringCmd + Rename(ctx context.Context, key, newkey string) *StatusCmd + RenameNX(ctx context.Context, key, newkey string) *BoolCmd + Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd + SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd + Touch(ctx context.Context, keys ...string) *IntCmd + TTL(ctx context.Context, key string) *DurationCmd + Type(ctx context.Context, key string) *StatusCmd + Append(ctx context.Context, key, value string) *IntCmd + Decr(ctx context.Context, key string) *IntCmd + DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + Get(ctx context.Context, key string) *StringCmd + GetRange(ctx context.Context, key string, start, end int64) *StringCmd + GetSet(ctx context.Context, key string, value interface{}) *StringCmd + GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd + GetDel(ctx context.Context, key string) *StringCmd + Incr(ctx context.Context, key string) *IntCmd + IncrBy(ctx context.Context, key string, value int64) *IntCmd + IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd + MGet(ctx context.Context, keys ...string) *SliceCmd + MSet(ctx context.Context, values ...interface{}) *StatusCmd + MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd + // TODO: rename to SetEx + SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd + StrLen(ctx context.Context, key string) *IntCmd + Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd + + GetBit(ctx context.Context, key string, offset int64) *IntCmd + SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd + BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd + BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpNot(ctx context.Context, destKey string, key string) *IntCmd + BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd + + Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd + ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd + SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + + HDel(ctx context.Context, key string, fields ...string) *IntCmd + HExists(ctx context.Context, key, field string) *BoolCmd + HGet(ctx context.Context, key, field string) *StringCmd + HGetAll(ctx context.Context, key string) *StringStringMapCmd + HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd + HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd + HKeys(ctx context.Context, key string) *StringSliceCmd + HLen(ctx context.Context, key string) *IntCmd + HMGet(ctx context.Context, key string, fields ...string) *SliceCmd + HSet(ctx context.Context, key string, values ...interface{}) *IntCmd + HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd + HVals(ctx context.Context, key string) *StringSliceCmd + HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd + + BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LIndex(ctx context.Context, key string, index int64) *StringCmd + LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LLen(ctx context.Context, key string) *IntCmd + LPop(ctx context.Context, key string) *StringCmd + LPopCount(ctx context.Context, key string, count int) *StringSliceCmd + LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd + LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd + LPush(ctx context.Context, key string, values ...interface{}) *IntCmd + LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd + LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd + LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd + RPop(ctx context.Context, key string) *StringCmd + RPopCount(ctx context.Context, key string, count int) *StringSliceCmd + RPopLPush(ctx context.Context, source, destination string) *StringCmd + RPush(ctx context.Context, key string, values ...interface{}) *IntCmd + RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd + BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd + + SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd + SCard(ctx context.Context, key string) *IntCmd + SDiff(ctx context.Context, keys ...string) *StringSliceCmd + SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd + SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd + SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd + SMembers(ctx context.Context, key string) *StringSliceCmd + SMembersMap(ctx context.Context, key string) *StringStructMapCmd + SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd + SPop(ctx context.Context, key string) *StringCmd + SPopN(ctx context.Context, key string, count int64) *StringSliceCmd + SRandMember(ctx context.Context, key string) *StringCmd + SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd + SRem(ctx context.Context, key string, members ...interface{}) *IntCmd + SUnion(ctx context.Context, keys ...string) *StringSliceCmd + SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd + + XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XLen(ctx context.Context, stream string) *IntCmd + XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd + XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd + XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd + XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd + XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd + XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd + XGroupDestroy(ctx context.Context, stream, group string) *IntCmd + XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd + XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd + XPending(ctx context.Context, stream, group string) *XPendingCmd + XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd + XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd + XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd + XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd + + // TODO: XTrim and XTrimApprox remove in v9. + XTrim(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMinID(ctx context.Context, key string, minID string) *IntCmd + XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd + XInfoStream(ctx context.Context, key string) *XInfoStreamCmd + XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd + XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd + + BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + + // TODO: remove + // ZAddCh + // ZIncr + // ZAddNXCh + // ZAddXXCh + // ZIncrNX + // ZIncrXX + // in v9. + // use ZAddArgs and ZAddArgsIncr. + + ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd + ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd + ZIncr(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd + ZCard(ctx context.Context, key string) *IntCmd + ZCount(ctx context.Context, key, min, max string) *IntCmd + ZLexCount(ctx context.Context, key, min, max string) *IntCmd + ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd + ZInter(ctx context.Context, store *ZStore) *StringSliceCmd + ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd + ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd + ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd + ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd + ZRank(ctx context.Context, key, member string) *IntCmd + ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd + ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd + ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd + ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRevRank(ctx context.Context, key, member string) *IntCmd + ZScore(ctx context.Context, key, member string) *FloatCmd + ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + ZUnion(ctx context.Context, store ZStore) *StringSliceCmd + ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd + ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd + ZDiff(ctx context.Context, keys ...string) *StringSliceCmd + ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd + ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + + PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd + PFCount(ctx context.Context, keys ...string) *IntCmd + PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd + + BgRewriteAOF(ctx context.Context) *StatusCmd + BgSave(ctx context.Context) *StatusCmd + ClientKill(ctx context.Context, ipPort string) *StatusCmd + ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd + ClientList(ctx context.Context) *StringCmd + ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientID(ctx context.Context) *IntCmd + ConfigGet(ctx context.Context, parameter string) *SliceCmd + ConfigResetStat(ctx context.Context) *StatusCmd + ConfigSet(ctx context.Context, parameter, value string) *StatusCmd + ConfigRewrite(ctx context.Context) *StatusCmd + DBSize(ctx context.Context) *IntCmd + FlushAll(ctx context.Context) *StatusCmd + FlushAllAsync(ctx context.Context) *StatusCmd + FlushDB(ctx context.Context) *StatusCmd + FlushDBAsync(ctx context.Context) *StatusCmd + Info(ctx context.Context, section ...string) *StringCmd + LastSave(ctx context.Context) *IntCmd + Save(ctx context.Context) *StatusCmd + Shutdown(ctx context.Context) *StatusCmd + ShutdownSave(ctx context.Context) *StatusCmd + ShutdownNoSave(ctx context.Context) *StatusCmd + SlaveOf(ctx context.Context, host, port string) *StatusCmd + Time(ctx context.Context) *TimeCmd + DebugObject(ctx context.Context, key string) *StringCmd + ReadOnly(ctx context.Context) *StatusCmd + ReadWrite(ctx context.Context) *StatusCmd + MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptFlush(ctx context.Context) *StatusCmd + ScriptKill(ctx context.Context) *StatusCmd + ScriptLoad(ctx context.Context, script string) *StringCmd + + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumPat(ctx context.Context) *IntCmd + + ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterNodes(ctx context.Context) *StringCmd + ClusterMeet(ctx context.Context, host, port string) *StatusCmd + ClusterForget(ctx context.Context, nodeID string) *StatusCmd + ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd + ClusterResetSoft(ctx context.Context) *StatusCmd + ClusterResetHard(ctx context.Context) *StatusCmd + ClusterInfo(ctx context.Context) *StringCmd + ClusterKeySlot(ctx context.Context, key string) *IntCmd + ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd + ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd + ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd + ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd + ClusterSaveConfig(ctx context.Context) *StatusCmd + ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd + ClusterFailover(ctx context.Context) *StatusCmd + ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd + + GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd + GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd + GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd + GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd + GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd + GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd + GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd + GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(ctx context.Context, password string) *StatusCmd + AuthACL(ctx context.Context, username, password string) *StatusCmd + Select(ctx context.Context, index int) *StatusCmd + SwapDB(ctx context.Context, index1, index2 int) *StatusCmd + ClientSetName(ctx context.Context, name string) *BoolCmd +} + +var ( + _ Cmdable = (*Client)(nil) + _ Cmdable = (*Tx)(nil) + _ Cmdable = (*Ring)(nil) + _ Cmdable = (*ClusterClient)(nil) +) + +type cmdable func(ctx context.Context, cmd Cmder) error + +type statefulCmdable func(ctx context.Context, cmd Cmder) error + +//------------------------------------------------------------------------------ + +func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", password) + _ = c(ctx, cmd) + return cmd +} + +// AuthACL Perform an AUTH command, using the given user and pass. +// Should be used to authenticate the current connection with one of the connections defined in the ACL list +// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. +func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", username, password) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { + cmd := NewStatusCmd(ctx, "select", index) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { + cmd := NewStatusCmd(ctx, "swapdb", index1, index2) + _ = c(ctx, cmd) + return cmd +} + +// ClientSetName assigns a name to the connection. +func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "setname", name) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { + cmd := NewCommandsInfoCmd(ctx, "command") + _ = c(ctx, cmd) + return cmd +} + +// ClientGetName returns the name of the connection. +func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "getname") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "echo", message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Ping(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "ping") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Quit(_ context.Context) *StatusCmd { + panic("not implemented") +} + +func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "dump", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "") +} + +func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "NX") +} + +func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "XX") +} + +func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "GT") +} + +func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "LT") +} + +func (c cmdable) expire( + ctx context.Context, key string, expiration time.Duration, mode string, +) *BoolCmd { + args := make([]interface{}, 3, 4) + args[0] = "expire" + args[1] = key + args[2] = formatSec(ctx, expiration) + if mode != "" { + args = append(args, mode) + } + + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "keys", pattern) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "migrate", + host, + port, + key, + db, + formatMs(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { + cmd := NewBoolCmd(ctx, "move", key, db) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "refcount", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "object", "encoding", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { + cmd := NewBoolCmd(ctx, "persist", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + ctx, + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RandomKey(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "randomkey") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { + cmd := NewStatusCmd(ctx, "rename", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { + cmd := NewBoolCmd(ctx, "renamenx", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + "replace", + ) + _ = c(ctx, cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "ttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { + cmd := NewStatusCmd(ctx, "type", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { + cmd := NewIntCmd(ctx, "append", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "decr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { + cmd := NewIntCmd(ctx, "decrby", key, decrement) + _ = c(ctx, cmd) + return cmd +} + +// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c cmdable) Get(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "get", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { + cmd := NewStringCmd(ctx, "getrange", key, start, end) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "getset", key, value) + _ = c(ctx, cmd) + return cmd +} + +// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). +// Requires Redis >= 6.2.0. +func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { + args := make([]interface{}, 0, 4) + args = append(args, "getex", key) + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == 0 { + args = append(args, "persist") + } + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// GetDel redis-server version >= 6.2.0. +func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "getdel", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "incr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { + cmd := NewIntCmd(ctx, "incrby", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSet is like Set but accepts multiple values: +// - MSet("key1", "value1", "key2", "value2") +// - MSet([]string{"key1", "value1", "key2", "value2"}) +// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "mset" + args = appendArgs(args, values) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSetNX is like SetNX but accepts multiple values: +// - MSetNX("key1", "value1", "key2", "value2") +// - MSetNX([]string{"key1", "value1", "key2", "value2"}) +// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "msetnx" + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Set Redis `SET key value [expiration]` command. +// Use expiration for `SETEX`-like behavior. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetArgs provides arguments for the SetArgs function. +type SetArgs struct { + // Mode can be `NX` or `XX` or empty. + Mode string + + // Zero `TTL` or `Expiration` means that the key has no expiration time. + TTL time.Duration + ExpireAt time.Time + + // When Get is true, the command returns the old value stored at key, or nil when key did not exist. + Get bool + + // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, + // otherwise you will receive an error: (error) ERR syntax error. + KeepTTL bool +} + +// SetArgs supports all the options that the SET command supports. +// It is the alternative to the Set function when you want +// to have more control over the options. +func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { + args := []interface{}{"set", key, value} + + if a.KeepTTL { + args = append(args, "keepttl") + } + + if !a.ExpireAt.IsZero() { + args = append(args, "exat", a.ExpireAt.Unix()) + } + if a.TTL > 0 { + if usePrecise(a.TTL) { + args = append(args, "px", formatMs(ctx, a.TTL)) + } else { + args = append(args, "ex", formatSec(ctx, a.TTL)) + } + } + + if a.Mode != "" { + args = append(args, a.Mode) + } + + if a.Get { + args = append(args, "get") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetEX Redis `SETEX key expiration value` command. +func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) + _ = c(ctx, cmd) + return cmd +} + +// SetNX Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd(ctx, "setnx", key, value) + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +// SetXX Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + cmd = NewBoolCmd(ctx, "set", key, value, "xx") + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd(ctx, "setrange", key, offset, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "strlen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { + args := []interface{}{"copy", sourceKey, destKey, "DB", db} + if replace { + args = append(args, "REPLACE") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { + cmd := NewIntCmd(ctx, "getbit", key, offset) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + ctx, + "setbit", + key, + offset, + value, + ) + _ = c(ctx, cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "and", destKey, keys...) +} + +func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "or", destKey, keys...) +} + +func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "xor", destKey, keys...) +} + +func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { + return c.bitOp(ctx, "not", destKey, key) +} + +func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { + a := make([]interface{}, 0, 2+len(args)) + a = append(a, "bitfield") + a = append(a, key) + a = append(a, args...) + cmd := NewIntSliceCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + if keyType != "" { + args = append(args, "type", keyType) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { + cmd := NewBoolCmd(ctx, "hexists", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { + cmd := NewStringCmd(ctx, "hget", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "hgetall", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { + cmd := NewIntCmd(ctx, "hincrby", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hkeys", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "hlen", key) + _ = c(ctx, cmd) + return cmd +} + +// HMGet returns the values for the specified fields in the hash stored at key. +// It returns an interface{} to distinguish between empty string and nil value. +func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HSet accepts values in following formats: +// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// +// Note that it requires Redis v4 for multiple field/value pairs support. +func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hset" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HMSet is a deprecated version of HSet left for compatibility with Redis 3. +func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hmset" + args[1] = key + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hvals", key) + _ = c(ctx, cmd) + return cmd +} + +// HRandField redis-server version >= 6.2.0. +func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "hrandfield", key, count) + if withValues { + args = append(args, "withvalues") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + ctx, + "brpoplpush", + source, + destination, + formatSec(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { + cmd := NewStringCmd(ctx, "lindex", key, index) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "llen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "lpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "lpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +type LPosArgs struct { + Rank, MaxLen int64 +} + +func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { + args := []interface{}{"lpos", key, value} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { + args := []interface{}{"lpos", key, value, "count", count} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + ctx, + "lrange", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "lrem", key, count, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "lset", key, index, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "ltrim", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "rpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "rpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { + cmd := NewStringCmd(ctx, "rpoplpush", source, destination) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { + cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BLMove( + ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, +) *StringCmd { + cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "scard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "sismember", key, member) + _ = c(ctx, cmd) + return cmd +} + +// SMIsMember Redis `SMISMEMBER key member [member ...]` command. +func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "smismember" + args[1] = key + args = appendArgs(args, members) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SMembers Redis `SMEMBERS key` command output as a slice. +func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +// SMembersMap Redis `SMEMBERS key` command output as a map. +func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "smove", source, destination, member) + _ = c(ctx, cmd) + return cmd +} + +// SPop Redis `SPOP key` command. +func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "spop", key) + _ = c(ctx, cmd) + return cmd +} + +// SPopN Redis `SPOP key count` command. +func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "spop", key, count) + _ = c(ctx, cmd) + return cmd +} + +// SRandMember Redis `SRANDMEMBER key` command. +func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "srandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// SRandMemberN Redis `SRANDMEMBER key count` command. +func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "srandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// XAddArgs accepts values in the following formats: +// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} +// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") +// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} +// +// Note that map will not preserve the order of key-value pairs. +// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. +type XAddArgs struct { + Stream string + NoMkStream bool + MaxLen int64 // MAXLEN N + + // Deprecated: use MaxLen+Approx, remove in v9. + MaxLenApprox int64 // MAXLEN ~ N + + MinID string + // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). + Approx bool + Limit int64 + ID string + Values interface{} +} + +// XAdd a.Limit has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 11) + args = append(args, "xadd", a.Stream) + if a.NoMkStream { + args = append(args, "nomkstream") + } + switch { + case a.MaxLen > 0: + if a.Approx { + args = append(args, "maxlen", "~", a.MaxLen) + } else { + args = append(args, "maxlen", a.MaxLen) + } + case a.MaxLenApprox > 0: + // TODO remove in v9. + args = append(args, "maxlen", "~", a.MaxLenApprox) + case a.MinID != "": + if a.Approx { + args = append(args, "minid", "~", a.MinID) + } else { + args = append(args, "minid", a.MinID) + } + } + if a.Limit > 0 { + args = append(args, "limit", a.Limit) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + args = appendArg(args, a.Values) + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { + cmd := NewIntCmd(ctx, "xlen", stream) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +type XReadArgs struct { + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration +} + +func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 6+len(a.Streams)) + args = append(args, "xread") + + keyPos := int8(1) + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + keyPos += 2 + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { + return c.XRead(ctx, &XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration + NoAck bool +} + +func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 10+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + + keyPos := int8(4) + if a.Count > 0 { + args = append(args, "count", a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + keyPos += 2 + } + if a.NoAck { + args = append(args, "noack") + keyPos++ + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { + cmd := NewXPendingCmd(ctx, "xpending", stream, group) + _ = c(ctx, cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Idle time.Duration + Start string + End string + Count int64 + Consumer string +} + +func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 9) + args = append(args, "xpending", a.Stream, a.Group) + if a.Idle != 0 { + args = append(args, "idle", formatMs(ctx, a.Idle)) + } + args = append(args, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type XAutoClaimArgs struct { + Stream string + Group string + MinIdle time.Duration + Start string + Count int64 + Consumer string +} + +func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { + args := xAutoClaimArgs(ctx, a) + cmd := NewXAutoClaimCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { + args := xAutoClaimArgs(ctx, a) + args = append(args, "justid") + cmd := NewXAutoClaimJustIDCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { + args := make([]interface{}, 0, 8) + args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + return args +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 5+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). +// example: +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// The redis-server version is lower than 6.2, please set limit to 0. +func (c cmdable) xTrim( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Deprecated: use XTrimMaxLen, remove in v9. +func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// Deprecated: use XTrimMaxLenApprox, remove in v9. +func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) +} + +// XTrimMaxLen No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MAXLEN maxLen +func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit +func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) +} + +// XTrimMinID No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MINID minID +func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { + return c.xTrim(ctx, key, "minid", false, minID, 0) +} + +// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MINID ~ minID LIMIT limit +func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { + return c.xTrim(ctx, key, "minid", true, minID, limit) +} + +func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { + cmd := NewXInfoConsumersCmd(ctx, key, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { + cmd := NewXInfoGroupsCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { + cmd := NewXInfoStreamCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +// XInfoStreamFull XINFO STREAM FULL [COUNT count] +// redis-server >= 6.0. +func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { + args := make([]interface{}, 0, 6) + args = append(args, "xinfo", "stream", key, "full") + if count > 0 { + args = append(args, "count", count) + } + cmd := NewXInfoStreamFullCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. +type ZStore struct { + Keys []string + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (z ZStore) len() (n int) { + n = len(z.Keys) + if len(z.Weights) > 0 { + n += 1 + len(z.Weights) + } + if z.Aggregate != "" { + n += 2 + } + return n +} + +func (z ZStore) appendArgs(args []interface{}) []interface{} { + for _, key := range z.Keys { + args = append(args, key) + } + if len(z.Weights) > 0 { + args = append(args, "weights") + for _, weights := range z.Weights { + args = append(args, weights) + } + } + if z.Aggregate != "" { + args = append(args, "aggregate", z.Aggregate) + } + return args +} + +// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. +func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. +func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. +type ZAddArgs struct { + NX bool + XX bool + LT bool + GT bool + Ch bool + Members []Z +} + +func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { + a := make([]interface{}, 0, 6+2*len(args.Members)) + a = append(a, "zadd", key) + + // The GT, LT and NX options are mutually exclusive. + if args.NX { + a = append(a, "nx") + } else { + if args.XX { + a = append(a, "xx") + } + if args.GT { + a = append(a, "gt") + } else if args.LT { + a = append(a, "lt") + } + } + if args.Ch { + a = append(a, "ch") + } + if incr { + a = append(a, "incr") + } + for _, m := range args.Members { + a = append(a, m.Score) + a = append(a, m.Member) + } + return a +} + +func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { + cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) + _ = c(ctx, cmd) + return cmd +} + +// TODO: Compatible with v8 api, will be removed in v9. +func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { + args.Members = make([]Z, len(members)) + for i, m := range members { + args.Members[i] = *m + } + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +// ZAdd Redis `ZADD key score member [score member ...]` command. +func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{}, members...) +} + +// ZAddNX Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + }, members...) +} + +// ZAddXX Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + }, members...) +} + +// ZAddCh Redis `ZADD key CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + Ch: true, + }, members...) +} + +// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// NX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + Ch: true, + }, members...) +} + +// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// XX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + Ch: true, + }, members...) +} + +// ZIncr Redis `ZADD key INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + Members: []Z{*member}, + }) +} + +// ZIncrNX Redis `ZADD key NX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// NX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + NX: true, + Members: []Z{*member}, + }) +} + +// ZIncrXX Redis `ZADD key XX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// XX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + XX: true, + Members: []Z{*member}, + }) +} + +func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "zcard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zlexcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinterstore", destination, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "zmscore" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZRangeArgs is all the options of the ZRange command. +// In version> 6.2.0, you can replace the(cmd): +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// Please pay attention to your redis-server version. +// +// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. +type ZRangeArgs struct { + Key string + + // When the ByScore option is provided, the open interval(exclusive) can be set. + // By default, the score intervals specified by and are closed (inclusive). + // It is similar to the deprecated(6.2.0+) ZRangeByScore command. + // For example: + // ZRangeArgs{ + // Key: "example-key", + // Start: "(3", + // Stop: 8, + // ByScore: true, + // } + // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). + // + // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. + // You can set the and options as follows: + // ZRangeArgs{ + // Key: "example-key", + // Start: "[abc", + // Stop: "(def", + // ByLex: true, + // } + // cmd: "ZRange example-key [abc (def ByLex" + // + // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int). + // You can read the documentation for more information: https://redis.io/commands/zrange + Start interface{} + Stop interface{} + + // The ByScore and ByLex options are mutually exclusive. + ByScore bool + ByLex bool + + Rev bool + + // limit offset count. + Offset int64 + Count int64 +} + +func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { + // For Rev+ByScore/ByLex, we need to adjust the position of and . + if z.Rev && (z.ByScore || z.ByLex) { + args = append(args, z.Key, z.Stop, z.Start) + } else { + args = append(args, z.Key, z.Start, z.Stop) + } + + if z.ByScore { + args = append(args, "byscore") + } else if z.ByLex { + args = append(args, "bylex") + } + if z.Rev { + args = append(args, "rev") + } + if z.Offset != 0 || z.Count != 0 { + args = append(args, "limit", z.Offset, z.Count) + } + return args +} + +func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { + args := make([]interface{}, 0, 9) + args = append(args, "zrange") + args = z.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrange") + args = z.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + return c.ZRangeArgs(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) +} + +func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebylex", key, opt, false) +} + +func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrangestore", dst) + args = z.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + ctx, + "zremrangebyrank", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) +} + +func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) +} + +func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrevrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zscore", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunionstore", dest, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +// ZRandMember redis-server version >= 6.2.0. +func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "zrandmember", key, count) + if withScores { + args = append(args, "withscores") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZDiff redis-server version >= 6.2.0. +func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffWithScores redis-server version >= 6.2.0. +func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + args[len(keys)+2] = "withscores" + + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffStore redis-server version >=6.2.0. +func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 0, 3+len(keys)) + args = append(args, "zdiffstore", destination, len(keys)) + for _, key := range keys { + args = append(args, key) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgrewriteaof") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BgSave(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { + cmd := NewStatusCmd(ctx, "client", "kill", ipPort) + _ = c(ctx, cmd) + return cmd +} + +// ClientKillByFilter is new style syntax, while the ClientKill is old +// +// CLIENT KILL