summaryrefslogtreecommitdiffstats
path: root/dependencies/pkg/mod/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'dependencies/pkg/mod/github.com')
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml32
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s215
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml56
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md)31
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go)3
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh10
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go)47
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s209
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s183
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go)2
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go)22
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go)3
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go (renamed from dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml11
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml19
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml17
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore3
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md177
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile35
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md175
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go3478
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go3475
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod20
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum108
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go36
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go332
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go72
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go93
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go12
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go21
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go67
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json8
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go773
-rw-r--r--dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf10
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go25
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod3
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go22
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml)8
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml)28
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS)16
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md)48
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md)78
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go)63
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go)64
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go)100
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go)67
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go)27
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go)917
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go)128
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go47
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go)113
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go)16
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go)70
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod5
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go)12
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go)209
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go)61
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go50
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go)23
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go)0
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go)13
-rw-r--r--dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go (renamed from dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go)89
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md29
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml)32
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md)23
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go)36
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go)39
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod)2
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go)7
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go)41
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go)124
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go)26
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go)7
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go)13
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go)5
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go)4
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go)3
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go)62
-rw-r--r--dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go (renamed from dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go)89
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml9
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md10
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS6
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml2
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml26
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml20
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md41
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md26
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md)10
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go)6
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go)2
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go)21
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go)89
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go)228
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go (renamed from dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go)0
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go56
-rw-r--r--dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go104
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml)3
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go81
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go)6
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go)16
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go)10
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go)105
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c)19269
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h)435
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go)195
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go)24
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go)9
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go)11
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c)0
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go)4
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go)36
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go)36
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go)13
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go)26
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go)74
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go)2
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go)1
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h)10
-rw-r--r--dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go (renamed from dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go)5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml29
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt60
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml)15
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml41
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml26
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml24
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml14
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml25
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml59
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md124
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md101
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile44
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md274
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go35
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go)35
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go)110
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go163
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go98
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go192
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go5483
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go718
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go)2312
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go)18
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go)44
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go)91
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go)31
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go149
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go114
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go384
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go155
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod10
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum8
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go174
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go42
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go)4
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go)50
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go)36
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go)2
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go)20
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go)18
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go49
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go9
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go47
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go)5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go123
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go)227
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go)194
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go)4
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go552
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go100
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go)42
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go154
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go)22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go5
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go)1
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go53
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go354
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go)13
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go)6
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go599
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go660
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go289
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go)196
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go48
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go)271
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go)41
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go)579
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go)12
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go)499
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go)74
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go)14
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go)51
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go1429
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go733
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go)99
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go76
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go)143
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go)85
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go852
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go)248
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go)22
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go)481
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go)287
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go)23
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go215
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh)7
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh)0
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go)368
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go)110
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go217
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go772
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go438
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go303
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go922
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go940
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go)56
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go)11
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go)124
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go)9
-rw-r--r--dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go (renamed from dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go)2
397 files changed, 41578 insertions, 18134 deletions
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml
deleted file mode 100644
index 606cf83..0000000
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/.github/workflows/test.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: Test
-
-on:
- push:
- branches: [main]
- pull_request:
-
-jobs:
- test:
- strategy:
- matrix:
- go-version: [1.16.x, 1.17.x]
- os: [ubuntu-latest, macos-latest, windows-latest]
- runs-on: ${{ matrix.os }}
-
- steps:
- - name: Install go
- uses: actions/setup-go@v2
- with:
- go-version: ${{ matrix.go-version }}
-
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Test
- run: go test -count 1 -bench . -benchtime 1x ./...
-
- - name: Test with -tags purego
- run: go test -count 1 -bench . -benchtime 1x -tags purego ./...
-
-# TODO: Test on other architectures. Unfortunately only amd64 is supported
-# by GH Actions. We could use QEMU in the meantime.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s
deleted file mode 100644
index be8db5b..0000000
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.s
+++ /dev/null
@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
-
- // Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- // Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
-
- RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml
new file mode 100644
index 0000000..542b91f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/.github/workflows/test.yml
@@ -0,0 +1,56 @@
+name: Test
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+
+jobs:
+ test:
+ strategy:
+ matrix:
+ go-version: [1.16.x, 1.17.x]
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Install go
+ uses: WillAbides/setup-go-faster@v1.5.0
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Test
+ run: go test -count 1 -bench . -benchtime 1x ./...
+
+ - name: Test with -tags purego
+ run: go test -count 1 -bench . -benchtime 1x -tags purego ./...
+
+ test-qemu:
+ needs: test
+ strategy:
+ matrix:
+ go-version: [1.16.x, 1.17.x]
+ arch: [386, arm, arm64]
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Install go
+ uses: WillAbides/setup-go-faster@v1.5.0
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Install QEMU
+ uses: docker/setup-qemu-action@v1
+
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Run test via qemu/binfmt
+ # TODO: Run the dynamic linking tests as well. That is a little more
+ # involved.
+ run: go test -v -count 1 -bench . -benchtime 1x
+ env:
+ GOARCH: ${{ matrix.arch }}
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt
index 24b5306..24b5306 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md
index 792b4a6..8bf0e5b 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/README.md
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/README.md
@@ -3,8 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go
index 4dfeb91..1633823 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/bench_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/bench_test.go
@@ -10,6 +10,7 @@ var benchmarks = []struct {
n int64
}{
{"4B", 4},
+ {"16B", 16},
{"100B", 100},
{"4KB", 4e3},
{"10MB", 10e6},
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore
index 8a84f19..8a84f19 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/.gitignore
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/.gitignore
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go
index c86bc93..4766b58 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/dynamic_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/dynamic_test.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin
// +build linux darwin
package main
@@ -5,6 +6,7 @@ package main
import (
"bytes"
"log"
+ "os"
"os/exec"
"plugin"
"testing"
@@ -25,7 +27,7 @@ func TestMain(m *testing.M) {
if err := cmd.Run(); err != nil {
log.Fatalf("Error building plugin: %s\nOutput:\n%s", err, out.String())
}
- m.Run()
+ os.Exit(m.Run())
}
func TestDynamic(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go
index 319ed71..fcf3e73 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/dynamic/plugin.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/dynamic/plugin.go
@@ -1,3 +1,4 @@
+//go:build ignore
// +build ignore
package main
@@ -12,7 +13,7 @@ import (
const (
in = "Call me Ishmael. Some years ago--never mind how long precisely-"
- want = 0x02a2e85470d6fd96
+ want = uint64(0x02a2e85470d6fd96)
)
func TestSum(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod
index 49f6760..49f6760 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.mod
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.mod
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/go.sum
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/go.sum
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh
new file mode 100644
index 0000000..94b9c44
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go
index 15c835d..a9e0d45 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash.go
@@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s
new file mode 100644
index 0000000..3e8b132
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_amd64.s
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s
new file mode 100644
index 0000000..7e3145a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go
index ad14b80..9216e0a 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_amd64.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_asm.go
@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go
index 4a5a821..26df13b 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_other.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_other.go
@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go
index fc9bea7..e86f1b5 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_safe.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go
index 6330f19..6330f19 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_test.go
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go
index 376e0ca..1c1638f 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go
index 6598267..6d6f93c 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhash_unsafe_test.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhash_unsafe_test.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
package xxhash
@@ -25,7 +26,7 @@ func TestStringAllocs(t *testing.T) {
})
}
-// This test is inspired by the Go runtime tests in https://golang.org/cl/57410.
+// This test is inspired by the Go runtime tests in https://go.dev/cl/57410.
// It asserts that certain important functions may be inlined.
func TestInlining(t *testing.T) {
funcs := map[string]struct{}{
@@ -33,8 +34,6 @@ func TestInlining(t *testing.T) {
"(*Digest).WriteString": {},
}
- // TODO: it would be better to use the go binary that is running
- // 'go test' (if we are running under 'go test').
cmd := exec.Command("go", "test", "-gcflags=-m", "-run", "xxxx")
out, err := cmd.CombinedOutput()
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore
index 2c88f1d..2c88f1d 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/.gitignore
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/.gitignore
diff --git a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go
index 9b1d035..9b1d035 100644
--- a/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.1.2/xxhsum/xxhsum.go
+++ b/dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.2.0/xxhsum/xxhsum.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
deleted file mode 100644
index 5fcfeae..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/commitlint.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-name: Lint Commit Messages
-on: [pull_request]
-
-jobs:
- commitlint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - uses: wagoid/commitlint-github-action@v4
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
deleted file mode 100644
index 28c16c5..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/golangci-lint.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: golangci-lint
-
-on:
- push:
- tags:
- - v*
- branches:
- - master
- - main
- pull_request:
-
-jobs:
- golangci:
- name: lint
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: golangci-lint
- uses: golangci/golangci-lint-action@v2
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
deleted file mode 100644
index 685693a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/release.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Releases
-
-on:
- push:
- tags:
- - 'v*'
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: ncipollo/release-action@v1
- with:
- body:
- Please refer to
- [CHANGELOG.md](https://github.com/go-redis/redis/blob/master/CHANGELOG.md) for details
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
deleted file mode 100644
index b975a7b..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.rdb
-testdata/*/
-.idea/
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
deleted file mode 100644
index 195e519..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/CHANGELOG.md
+++ /dev/null
@@ -1,177 +0,0 @@
-## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
-
-
-### Bug Fixes
-
-* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
-* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
-* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
-* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
-* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
-* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
-* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
-* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
-* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
-
-
-### Features
-
-* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
-* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
-* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
-* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
-* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
-* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
-* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
-
-
-
-## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
-
-
-### Features
-
-* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
-* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
-* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
-
-
-
-## v8.11
-
-- Remove OpenTelemetry metrics.
-- Supports more redis commands and options.
-
-## v8.10
-
-- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
- single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
- decision:
-
- - Traces become smaller and less noisy.
- - It may be costly to process those 3 extra spans for each query.
- - go-redis no longer depends on OpenTelemetry.
-
- Eventually we hope to replace the information that we no longer collect with OpenTelemetry
- Metrics.
-
-## v8.9
-
-- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
- `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
-
-## v8.8
-
-- To make updating easier, extra modules now have the same version as go-redis does. That means that
- you need to update your imports:
-
-```
-github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
-github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
-```
-
-## v8.5
-
-- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
- struct:
-
-```go
-err := rdb.HGetAll(ctx, "hash").Scan(&data)
-
-err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
-```
-
-- Please check [redismock](https://github.com/go-redis/redismock) by
- [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
-
-## v8
-
-- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
- using `context.Context` yet, the simplest option is to define global package variable
- `var ctx = context.TODO()` and use it when `ctx` is required.
-
-- Full support for `context.Context` canceling.
-
-- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
-
-- Added `redisext.OpenTemetryHook` that adds
- [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
-
-- Redis slow log support.
-
-- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
- existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
-
-```go
-import "github.com/golang/groupcache/consistenthash"
-
-ring := redis.NewRing(&redis.RingOptions{
- NewConsistentHash: func() {
- return consistenthash.New(100, crc32.ChecksumIEEE)
- },
-})
-```
-
-- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
-- `Options.MaxRetries` default value is changed from 0 to 3.
-
-- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
-
-## v7.3
-
-- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
- URL contains username.
-
-## v7.2
-
-- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
-
-## v7.1
-
-- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
- interface.
-
-## v7
-
-- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
- transactional pipeline.
-- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
-- WithContext now can not be used to create a shallow copy of the client.
-- New methods ProcessContext, DoContext, and ExecContext.
-- Client respects Context.Deadline when setting net.Conn deadline.
-- Client listens on Context.Done while waiting for a connection from the pool and returns an error
- when context context is cancelled.
-- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
- detecting reconnections.
-- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
- the time.
-- `SetLimiter` is removed and added `Options.Limiter` instead.
-- `HMSet` is deprecated as of Redis v4.
-
-## v6.15
-
-- Cluster and Ring pipelines process commands for each node in its own goroutine.
-
-## 6.14
-
-- Added Options.MinIdleConns.
-- Added Options.MaxConnAge.
-- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
-- Add Client.Do to simplify creating custom commands.
-- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
-- Lower memory usage.
-
-## v6.13
-
-- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
- `HashReplicas = 1000` for better keys distribution between shards.
-- Cluster client was optimized to use much less memory when reloading cluster state.
-- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
- occurres. In most cases it is recommended to use PubSub.Channel instead.
-- Dialer.KeepAlive is set to 5 minutes by default.
-
-## v6.12
-
-- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
- Servers that don't have cluster mode enabled. See
- https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
deleted file mode 100644
index a4cfe05..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-test: testdeps
- go test ./...
- go test ./... -short -race
- go test ./... -run=NONE -bench=. -benchmem
- env GOOS=linux GOARCH=386 go test ./...
- go vet
-
-testdeps: testdata/redis/src/redis-server
-
-bench: testdeps
- go test ./... -test.run=NONE -test.bench=. -test.benchmem
-
-.PHONY: all test testdeps bench
-
-testdata/redis:
- mkdir -p $@
- wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
-
-testdata/redis/src/redis-server: testdata/redis
- cd $< && make all
-
-fmt:
- gofmt -w -s ./
- goimports -w -local github.com/go-redis/redis ./
-
-go_mod_tidy:
- go get -u && go mod tidy
- set -e; for dir in $(PACKAGE_DIRS); do \
- echo "go mod tidy in $${dir}"; \
- (cd "$${dir}" && \
- go get -u && \
- go mod tidy); \
- done
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
deleted file mode 100644
index f3b6a01..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# Redis client for Go
-
-![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
-
-go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
-Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
-OpenTelemetry and ClickHouse. Give it a star as well!
-
-## Resources
-
-- [Discussions](https://github.com/go-redis/redis/discussions)
-- [Documentation](https://redis.uptrace.dev)
-- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
-- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
-- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
-
-Other projects you may like:
-
-- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
-- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
-
-## Ecosystem
-
-- [Redis Mock](https://github.com/go-redis/redismock)
-- [Distributed Locks](https://github.com/bsm/redislock)
-- [Redis Cache](https://github.com/go-redis/cache)
-- [Rate limiting](https://github.com/go-redis/redis_rate)
-
-## Features
-
-- Redis 3 commands except QUIT, MONITOR, and SYNC.
-- Automatic connection pooling with
- [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
-- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
-- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
- [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
-- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
-- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
-- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
-- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
- without using cluster mode and Redis Sentinel.
-- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
-
-## Installation
-
-go-redis supports 2 last Go versions and requires a Go version with
-[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
-module:
-
-```shell
-go mod init github.com/my/repo
-```
-
-And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
-
-```shell
-go get github.com/go-redis/redis/v8
-```
-
-## Quickstart
-
-```go
-import (
- "context"
- "github.com/go-redis/redis/v8"
- "fmt"
-)
-
-var ctx = context.Background()
-
-func ExampleClient() {
- rdb := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- err := rdb.Set(ctx, "key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := rdb.Get(ctx, "key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := rdb.Get(ctx, "key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exist")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exist
-}
-```
-
-## Look and feel
-
-Some corner cases:
-
-```go
-// SET key value EX 10 NX
-set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
-
-// SET key value keepttl NX
-set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
-
-// SORT list LIMIT 0 2 ASC
-vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
-}).Result()
-
-// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
- Keys: []string{"zset1", "zset2"},
- Weights: []int64{2, 3}
-}).Result()
-
-// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
-
-// custom command
-res, err := rdb.Do(ctx, "set", "key", "value").Result()
-```
-
-## Run the test
-
-go-redis will start a redis-server and run the test cases.
-
-The paths of redis-server bin file and redis config file are defined in `main_test.go`:
-
-```
-var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
-)
-```
-
-For local testing, you can change the variables to refer to your local files, or create a soft link
-to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
-
-```
-ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
-cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
-```
-
-Lastly, run:
-
-```
-go test
-```
-
-## Contributors
-
-Thanks to all the people who already contributed!
-
-<a href="https://github.com/go-redis/redis/graphs/contributors">
- <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
-</a>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
deleted file mode 100644
index 4bb12a8..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command.go
+++ /dev/null
@@ -1,3478 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hscan"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type Cmder interface {
- Name() string
- FullName() string
- Args() []interface{}
- String() string
- stringArg(int) string
- firstKeyPos() int8
- SetFirstKeyPos(int8)
-
- readTimeout() *time.Duration
- readReply(rd *proto.Reader) error
-
- SetErr(error)
- Err() error
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
- for _, cmd := range cmds {
- if cmd.Err() == nil {
- cmd.SetErr(e)
- }
- }
-}
-
-func cmdsFirstErr(cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmds(wr *proto.Writer, cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := writeCmd(wr, cmd); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmd(wr *proto.Writer, cmd Cmder) error {
- return wr.WriteArgs(cmd.Args())
-}
-
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- if pos := cmd.firstKeyPos(); pos != 0 {
- return int(pos)
- }
-
- switch cmd.Name() {
- case "eval", "evalsha":
- if cmd.stringArg(2) != "0" {
- return 3
- }
-
- return 0
- case "publish":
- return 1
- case "memory":
- // https://github.com/redis/redis/issues/7493
- if cmd.stringArg(1) == "usage" {
- return 2
- }
- }
-
- if info != nil {
- return int(info.FirstKeyPos)
- }
- return 0
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
- b := make([]byte, 0, 64)
-
- for i, arg := range cmd.Args() {
- if i > 0 {
- b = append(b, ' ')
- }
- b = internal.AppendArg(b, arg)
- }
-
- if err := cmd.Err(); err != nil {
- b = append(b, ": "...)
- b = append(b, err.Error()...)
- } else if val != nil {
- b = append(b, ": "...)
- b = internal.AppendArg(b, val)
- }
-
- return internal.String(b)
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
- keyPos int8
-
- _readTimeout *time.Duration
-}
-
-var _ Cmder = (*Cmd)(nil)
-
-func (cmd *baseCmd) Name() string {
- if len(cmd.args) == 0 {
- return ""
- }
- // Cmd name must be lower cased.
- return internal.ToLower(cmd.stringArg(0))
-}
-
-func (cmd *baseCmd) FullName() string {
- switch name := cmd.Name(); name {
- case "cluster", "command":
- if len(cmd.args) == 1 {
- return name
- }
- if s2, ok := cmd.args[1].(string); ok {
- return name + " " + s2
- }
- return name
- default:
- return name
- }
-}
-
-func (cmd *baseCmd) Args() []interface{} {
- return cmd.args
-}
-
-func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd.args) {
- return ""
- }
- arg := cmd.args[pos]
- switch v := arg.(type) {
- case string:
- return v
- default:
- // TODO: consider using appendArg
- return fmt.Sprint(v)
- }
-}
-
-func (cmd *baseCmd) firstKeyPos() int8 {
- return cmd.keyPos
-}
-
-func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
- cmd.keyPos = keyPos
-}
-
-func (cmd *baseCmd) SetErr(e error) {
- cmd.err = e
-}
-
-func (cmd *baseCmd) Err() error {
- return cmd.err
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
- return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
- cmd._readTimeout = &d
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
- baseCmd
-
- val interface{}
-}
-
-func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
- return &Cmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) SetVal(val interface{}) {
- cmd.val = val
-}
-
-func (cmd *Cmd) Val() interface{} {
- return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) Text() (string, error) {
- if cmd.err != nil {
- return "", cmd.err
- }
- return toString(cmd.val)
-}
-
-func toString(val interface{}) (string, error) {
- switch val := val.(type) {
- case string:
- return val, nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for String", val)
- return "", err
- }
-}
-
-func (cmd *Cmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- switch val := cmd.val.(type) {
- case int64:
- return int(val), nil
- case string:
- return strconv.Atoi(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toInt64(cmd.val)
-}
-
-func toInt64(val interface{}) (int64, error) {
- switch val := val.(type) {
- case int64:
- return val, nil
- case string:
- return strconv.ParseInt(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toUint64(cmd.val)
-}
-
-func toUint64(val interface{}) (uint64, error) {
- switch val := val.(type) {
- case int64:
- return uint64(val), nil
- case string:
- return strconv.ParseUint(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat32(cmd.val)
-}
-
-func toFloat32(val interface{}) (float32, error) {
- switch val := val.(type) {
- case int64:
- return float32(val), nil
- case string:
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat64(cmd.val)
-}
-
-func toFloat64(val interface{}) (float64, error) {
- switch val := val.(type) {
- case int64:
- return float64(val), nil
- case string:
- return strconv.ParseFloat(val, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return toBool(cmd.val)
-}
-
-func toBool(val interface{}) (bool, error) {
- switch val := val.(type) {
- case int64:
- return val != 0, nil
- case string:
- return strconv.ParseBool(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
- return false, err
- }
-}
-
-func (cmd *Cmd) Slice() ([]interface{}, error) {
- if cmd.err != nil {
- return nil, cmd.err
- }
- switch val := cmd.val.(type) {
- case []interface{}:
- return val, nil
- default:
- return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
- }
-}
-
-func (cmd *Cmd) StringSlice() ([]string, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- ss := make([]string, len(slice))
- for i, iface := range slice {
- val, err := toString(iface)
- if err != nil {
- return nil, err
- }
- ss[i] = val
- }
- return ss, nil
-}
-
-func (cmd *Cmd) Int64Slice() ([]int64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]int64, len(slice))
- for i, iface := range slice {
- val, err := toInt64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]uint64, len(slice))
- for i, iface := range slice {
- val, err := toUint64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Float32Slice() ([]float32, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float32, len(slice))
- for i, iface := range slice {
- val, err := toFloat32(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) Float64Slice() ([]float64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float64, len(slice))
- for i, iface := range slice {
- val, err := toFloat64(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) BoolSlice() ([]bool, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- bools := make([]bool, len(slice))
- for i, iface := range slice {
- val, err := toBool(iface)
- if err != nil {
- return nil, err
- }
- bools[i] = val
- }
- return bools, nil
-}
-
-func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadReply(sliceParser)
- return err
-}
-
-// sliceParser implements proto.MultiBulkParse.
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, n)
- for i := 0; i < len(vals); i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals[i] = nil
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals[i] = err
- continue
- }
- return nil, err
- }
- vals[i] = v
- }
- return vals, nil
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
- baseCmd
-
- val []interface{}
-}
-
-var _ Cmder = (*SliceCmd)(nil)
-
-func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
- return &SliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SliceCmd) SetVal(val []interface{}) {
- cmd.val = val
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
- return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *SliceCmd) Scan(dst interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- // Pass the list of keys and values.
- // Skip the first two args for: HMGET key
- var args []interface{}
- if cmd.args[0] == "hmget" {
- args = cmd.args[2:]
- } else {
- // Otherwise, it's: MGET field field ...
- args = cmd.args[1:]
- }
-
- return hscan.Scan(dst, args, cmd.val)
-}
-
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(sliceParser)
- if err != nil {
- return err
- }
- cmd.val = v.([]interface{})
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StatusCmd)(nil)
-
-func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
- return &StatusCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StatusCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StatusCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
- baseCmd
-
- val int64
-}
-
-var _ Cmder = (*IntCmd)(nil)
-
-func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
- return &IntCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntCmd) SetVal(val int64) {
- cmd.val = val
-}
-
-func (cmd *IntCmd) Val() int64 {
- return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) Uint64() (uint64, error) {
- return uint64(cmd.val), cmd.err
-}
-
-func (cmd *IntCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadIntReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntSliceCmd struct {
- baseCmd
-
- val []int64
-}
-
-var _ Cmder = (*IntSliceCmd)(nil)
-
-func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
- return &IntSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntSliceCmd) SetVal(val []int64) {
- cmd.val = val
-}
-
-func (cmd *IntSliceCmd) Val() []int64 {
- return cmd.val
-}
-
-func (cmd *IntSliceCmd) Result() ([]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]int64, n)
- for i := 0; i < len(cmd.val); i++ {
- num, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = num
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
- baseCmd
-
- val time.Duration
- precision time.Duration
-}
-
-var _ Cmder = (*DurationCmd)(nil)
-
-func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
- return &DurationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- precision: precision,
- }
-}
-
-func (cmd *DurationCmd) SetVal(val time.Duration) {
- cmd.val = val
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
- return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadIntReply()
- if err != nil {
- return err
- }
- switch n {
- // -2 if the key does not exist
- // -1 if the key exists but has no associated expire
- case -2, -1:
- cmd.val = time.Duration(n)
- default:
- cmd.val = time.Duration(n) * cmd.precision
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type TimeCmd struct {
- baseCmd
-
- val time.Time
-}
-
-var _ Cmder = (*TimeCmd)(nil)
-
-func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
- return &TimeCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *TimeCmd) SetVal(val time.Time) {
- cmd.val = val
-}
-
-func (cmd *TimeCmd) Val() time.Time {
- return cmd.val
-}
-
-func (cmd *TimeCmd) Result() (time.Time, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *TimeCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- cmd.val = time.Unix(sec, microsec*1000)
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
- baseCmd
-
- val bool
-}
-
-var _ Cmder = (*BoolCmd)(nil)
-
-func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
- return &BoolCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolCmd) SetVal(val bool) {
- cmd.val = val
-}
-
-func (cmd *BoolCmd) Val() bool {
- return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(nil)
- // `SET key value NX` returns nil when key already exists. But
- // `SETNX key value` returns bool (0/1). So convert nil to bool.
- if err == Nil {
- cmd.val = false
- return nil
- }
- if err != nil {
- return err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case string:
- cmd.val = v == "OK"
- return nil
- default:
- return fmt.Errorf("got %T, wanted int64 or string", v)
- }
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StringCmd)(nil)
-
-func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
- return &StringCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StringCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
-}
-
-func (cmd *StringCmd) Bytes() ([]byte, error) {
- return util.StringToBytes(cmd.val), cmd.err
-}
-
-func (cmd *StringCmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return strconv.ParseBool(cmd.val)
-}
-
-func (cmd *StringCmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.Atoi(cmd.Val())
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseInt(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseUint(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- f, err := strconv.ParseFloat(cmd.Val(), 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseFloat(cmd.Val(), 64)
-}
-
-func (cmd *StringCmd) Time() (time.Time, error) {
- if cmd.err != nil {
- return time.Time{}, cmd.err
- }
- return time.Parse(time.RFC3339Nano, cmd.Val())
-}
-
-func (cmd *StringCmd) Scan(val interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
- return proto.Scan([]byte(cmd.val), val)
-}
-
-func (cmd *StringCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
- baseCmd
-
- val float64
-}
-
-var _ Cmder = (*FloatCmd)(nil)
-
-func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
- return &FloatCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatCmd) SetVal(val float64) {
- cmd.val = val
-}
-
-func (cmd *FloatCmd) Val() float64 {
- return cmd.val
-}
-
-func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *FloatCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadFloatReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatSliceCmd struct {
- baseCmd
-
- val []float64
-}
-
-var _ Cmder = (*FloatSliceCmd)(nil)
-
-func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
- return &FloatSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatSliceCmd) SetVal(val []float64) {
- cmd.val = val
-}
-
-func (cmd *FloatSliceCmd) Val() []float64 {
- return cmd.val
-}
-
-func (cmd *FloatSliceCmd) Result() ([]float64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *FloatSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]float64, n)
- for i := 0; i < len(cmd.val); i++ {
- switch num, err := rd.ReadFloatReply(); {
- case err == Nil:
- cmd.val[i] = 0
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = num
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
- baseCmd
-
- val []string
-}
-
-var _ Cmder = (*StringSliceCmd)(nil)
-
-func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
- return &StringSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringSliceCmd) SetVal(val []string) {
- cmd.val = val
-}
-
-func (cmd *StringSliceCmd) Val() []string {
- return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
- return proto.ScanSlice(cmd.Val(), container)
-}
-
-func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]string, n)
- for i := 0; i < len(cmd.val); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.val[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = s
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
- baseCmd
-
- val []bool
-}
-
-var _ Cmder = (*BoolSliceCmd)(nil)
-
-func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
- return &BoolSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolSliceCmd) SetVal(val []bool) {
- cmd.val = val
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
- return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]bool, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = n == 1
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
- baseCmd
-
- val map[string]string
-}
-
-var _ Cmder = (*StringStringMapCmd)(nil)
-
-func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
- cmd.val = val
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
- return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- strct, err := hscan.Struct(dest)
- if err != nil {
- return err
- }
-
- for k, v := range cmd.val {
- if err := strct.Scan(k, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = value
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringIntMapCmd struct {
- baseCmd
-
- val map[string]int64
-}
-
-var _ Cmder = (*StringIntMapCmd)(nil)
-
-func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
- cmd.val = val
-}
-
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
- return cmd.val
-}
-
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringIntMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = n
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStructMapCmd struct {
- baseCmd
-
- val map[string]struct{}
-}
-
-var _ Cmder = (*StringStructMapCmd)(nil)
-
-func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
- return &StringStructMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
- cmd.val = val
-}
-
-func (cmd *StringStructMapCmd) Val() map[string]struct{} {
- return cmd.val
-}
-
-func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStructMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- cmd.val[key] = struct{}{}
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XMessage struct {
- ID string
- Values map[string]interface{}
-}
-
-type XMessageSliceCmd struct {
- baseCmd
-
- val []XMessage
-}
-
-var _ Cmder = (*XMessageSliceCmd)(nil)
-
-func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
- return &XMessageSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
- cmd.val = val
-}
-
-func (cmd *XMessageSliceCmd) Val() []XMessage {
- return cmd.val
-}
-
-func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XMessageSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var err error
- cmd.val, err = readXMessageSlice(rd)
- return err
-}
-
-func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- msgs := make([]XMessage, n)
- for i := 0; i < n; i++ {
- var err error
- msgs[i], err = readXMessage(rd)
- if err != nil {
- return nil, err
- }
- }
- return msgs, nil
-}
-
-func readXMessage(rd *proto.Reader) (XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return XMessage{}, err
- }
- if n != 2 {
- return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return XMessage{}, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return XMessage{}, err
- }
- } else {
- values = v.(map[string]interface{})
- }
-
- return XMessage{
- ID: id,
- Values: values,
- }, nil
-}
-
-// stringInterfaceMapParser implements proto.MultiBulkParse.
-func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]interface{}, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XStream struct {
- Stream string
- Messages []XMessage
-}
-
-type XStreamSliceCmd struct {
- baseCmd
-
- val []XStream
-}
-
-var _ Cmder = (*XStreamSliceCmd)(nil)
-
-func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
- return &XStreamSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
- cmd.val = val
-}
-
-func (cmd *XStreamSliceCmd) Val() []XStream {
- return cmd.val
-}
-
-func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XStreamSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XStream, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- stream, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- msgs, err := readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = XStream{
- Stream: stream,
- Messages: msgs,
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPending struct {
- Count int64
- Lower string
- Higher string
- Consumers map[string]int64
-}
-
-type XPendingCmd struct {
- baseCmd
- val *XPending
-}
-
-var _ Cmder = (*XPendingCmd)(nil)
-
-func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
- return &XPendingCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingCmd) SetVal(val *XPending) {
- cmd.val = val
-}
-
-func (cmd *XPendingCmd) Val() *XPending {
- return cmd.val
-}
-
-func (cmd *XPendingCmd) Result() (*XPending, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- count, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- lower, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- higher, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = &XPending{
- Count: count,
- Lower: lower,
- Higher: higher,
- }
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- consumerName, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumerPending, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- if cmd.val.Consumers == nil {
- cmd.val.Consumers = make(map[string]int64)
- }
- cmd.val.Consumers[consumerName] = consumerPending
-
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- if err != nil && err != Nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
- RetryCount int64
-}
-
-type XPendingExtCmd struct {
- baseCmd
- val []XPendingExt
-}
-
-var _ Cmder = (*XPendingExtCmd)(nil)
-
-func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
- return &XPendingExtCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
- cmd.val = val
-}
-
-func (cmd *XPendingExtCmd) Val() []XPendingExt {
- return cmd.val
-}
-
-func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingExtCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XPendingExt, 0, n)
- for i := int64(0); i < n; i++ {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumer, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- idle, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- retryCount, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = append(cmd.val, XPendingExt{
- ID: id,
- Consumer: consumer,
- Idle: time.Duration(idle) * time.Millisecond,
- RetryCount: retryCount,
- })
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimCmd struct {
- baseCmd
-
- start string
- val []XMessage
-}
-
-var _ Cmder = (*XAutoClaimCmd)(nil)
-
-func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
- return &XAutoClaimCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val, err = readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimJustIDCmd struct {
- baseCmd
-
- start string
- val []string
-}
-
-var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
-
-func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
- return &XAutoClaimJustIDCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimJustIDCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- cmd.val = make([]string, nn)
- for i := 0; i < nn; i++ {
- cmd.val[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoConsumersCmd struct {
- baseCmd
- val []XInfoConsumer
-}
-
-type XInfoConsumer struct {
- Name string
- Pending int64
- Idle int64
-}
-
-var _ Cmder = (*XInfoConsumersCmd)(nil)
-
-func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
- return &XInfoConsumersCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "consumers", stream, group},
- },
- }
-}
-
-func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
- cmd.val = val
-}
-
-func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
- return cmd.val
-}
-
-func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoConsumersCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoConsumer, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXConsumerInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
- var consumer XInfoConsumer
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return consumer, err
- }
- if n != 6 {
- return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
- }
-
- for i := 0; i < 3; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- switch key {
- case "name":
- consumer.Name = val
- case "pending":
- consumer.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- case "idle":
- consumer.Idle, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- default:
- return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
- }
- }
-
- return consumer, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoGroupsCmd struct {
- baseCmd
- val []XInfoGroup
-}
-
-type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
- LastDeliveredID string
-}
-
-var _ Cmder = (*XInfoGroupsCmd)(nil)
-
-func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
- return &XInfoGroupsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "groups", stream},
- },
- }
-}
-
-func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
- cmd.val = val
-}
-
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
- return cmd.val
-}
-
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoGroupsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoGroup, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXGroupInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
- var group XInfoGroup
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return group, err
- }
- if n != 8 {
- return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
- }
-
- for i := 0; i < 4; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- switch key {
- case "name":
- group.Name = val
- case "consumers":
- group.Consumers, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "pending":
- group.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "last-delivered-id":
- group.LastDeliveredID = val
- default:
- return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
- }
- }
-
- return group, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamCmd struct {
- baseCmd
- val *XInfoStream
-}
-
-type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- FirstEntry XMessage
- LastEntry XMessage
-}
-
-var _ Cmder = (*XInfoStreamCmd)(nil)
-
-func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
- return &XInfoStreamCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "stream", stream},
- },
- }
-}
-
-func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamCmd) Val() *XInfoStream {
- return cmd.val
-}
-
-func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(xStreamInfoParser)
- if err != nil {
- return err
- }
- cmd.val = v.(*XInfoStream)
- return nil
-}
-
-func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 14 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 14", n)
- }
- var info XInfoStream
- for i := 0; i < 7; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- switch key {
- case "length":
- info.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- info.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- info.RadixTreeNodes, err = rd.ReadIntReply()
- case "groups":
- info.Groups, err = rd.ReadIntReply()
- case "last-generated-id":
- info.LastGeneratedID, err = rd.ReadString()
- case "first-entry":
- info.FirstEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- case "last-entry":
- info.LastEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return nil, err
- }
- }
- return &info, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamFullCmd struct {
- baseCmd
- val *XInfoStreamFull
-}
-
-type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- Entries []XMessage
- Groups []XInfoStreamGroup
-}
-
-type XInfoStreamGroup struct {
- Name string
- LastDeliveredID string
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
-}
-
-type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
-}
-
-type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-var _ Cmder = (*XInfoStreamFullCmd)(nil)
-
-func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
- return &XInfoStreamFullCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
- return cmd.val
-}
-
-func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamFullCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if n != 12 {
- return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 12", n)
- }
-
- cmd.val = &XInfoStreamFull{}
-
- for i := 0; i < 6; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return err
- }
-
- switch key {
- case "length":
- cmd.val.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
- case "last-generated-id":
- cmd.val.LastGeneratedID, err = rd.ReadString()
- case "entries":
- cmd.val.Entries, err = readXMessageSlice(rd)
- case "groups":
- cmd.val.Groups, err = readStreamGroups(rd)
- default:
- return fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- groups := make([]XInfoStreamGroup, 0, n)
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 10 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 10", nn)
- }
-
- group := XInfoStreamGroup{}
-
- for f := 0; f < 5; f++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch key {
- case "name":
- group.Name, err = rd.ReadString()
- case "last-delivered-id":
- group.LastDeliveredID, err = rd.ReadString()
- case "pel-count":
- group.PelCount, err = rd.ReadIntReply()
- case "pending":
- group.Pending, err = readXInfoStreamGroupPending(rd)
- case "consumers":
- group.Consumers, err = readXInfoStreamConsumers(rd)
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- groups = append(groups, group)
- }
-
- return groups, nil
-}
-
-func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- pending := make([]XInfoStreamGroupPending, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 4 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 4", nn)
- }
-
- p := XInfoStreamGroupPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- p.Consumer, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- pending = append(pending, p)
- }
-
- return pending, nil
-}
-
-func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- consumers := make([]XInfoStreamConsumer, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 8", nn)
- }
-
- c := XInfoStreamConsumer{}
-
- for f := 0; f < 4; f++ {
- cKey, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch cKey {
- case "name":
- c.Name, err = rd.ReadString()
- case "seen-time":
- seen, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
- case "pel-count":
- c.PelCount, err = rd.ReadIntReply()
- case "pending":
- pendingNumber, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
-
- for pn := 0; pn < pendingNumber; pn++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 3 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 3", nn)
- }
-
- p := XInfoStreamConsumerPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- c.Pending = append(c.Pending, p)
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", cKey)
- }
- if err != nil {
- return nil, err
- }
- }
- consumers = append(consumers, c)
- }
-
- return consumers, nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
- baseCmd
-
- val []Z
-}
-
-var _ Cmder = (*ZSliceCmd)(nil)
-
-func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZSliceCmd) SetVal(val []Z) {
- cmd.val = val
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
- return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]Z, n/2)
- for i := 0; i < len(cmd.val); i++ {
- member, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- score, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = Z{
- Member: member,
- Score: score,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ZWithKeyCmd struct {
- baseCmd
-
- val *ZWithKey
-}
-
-var _ Cmder = (*ZWithKeyCmd)(nil)
-
-func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
- return &ZWithKeyCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
- cmd.val = val
-}
-
-func (cmd *ZWithKeyCmd) Val() *ZWithKey {
- return cmd.val
-}
-
-func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ZWithKeyCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 3 {
- return nil, fmt.Errorf("got %d elements, expected 3", n)
- }
-
- cmd.val = &ZWithKey{}
- var err error
-
- cmd.val.Key, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Member, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
-
- process cmdable
-}
-
-var _ Cmder = (*ScanCmd)(nil)
-
-func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- process: process,
- }
-}
-
-func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
- cmd.page = page
- cmd.cursor = cursor
-}
-
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
-
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
-
-func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
- cmd.page, cmd.cursor, err = rd.ReadScanReply()
- return err
-}
-
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ClusterNode struct {
- ID string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
-}
-
-type ClusterSlotsCmd struct {
- baseCmd
-
- val []ClusterSlot
-}
-
-var _ Cmder = (*ClusterSlotsCmd)(nil)
-
-func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
- cmd.val = val
-}
-
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
-}
-
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ClusterSlotsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]ClusterSlot, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nodes[j].Addr = net.JoinHostPort(ip, port)
-
- if n == 3 {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- nodes[j].ID = id
- }
- }
-
- cmd.val[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
-}
-
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
-}
-
-type GeoLocationCmd struct {
- baseCmd
-
- q *GeoRadiusQuery
- locations []GeoLocation
-}
-
-var _ Cmder = (*GeoLocationCmd)(nil)
-
-func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- return &GeoLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: geoLocationArgs(q, args...),
- },
- q: q,
- }
-}
-
-func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
- }
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithGeoHash {
- args = append(args, "withhash")
- }
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- }
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
- }
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
- }
- return args
-}
-
-func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
- cmd.locations = locations
-}
-
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
-}
-
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
-}
-
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
-}
-
-func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if err != nil {
- return err
- }
- cmd.locations = v.([]GeoLocation)
- return nil
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case string:
- locs = append(locs, GeoLocation{
- Name: vv,
- })
- case *GeoLocation:
- // TODO: avoid copying
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-//------------------------------------------------------------------------------
-
-// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
-type GeoSearchQuery struct {
- Member string
-
- // Latitude and Longitude when using FromLonLat option.
- Longitude float64
- Latitude float64
-
- // Distance and unit when using ByRadius option.
- // Can use m, km, ft, or mi. Default is km.
- Radius float64
- RadiusUnit string
-
- // Height, width and unit when using ByBox option.
- // Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
- BoxHeight float64
- BoxUnit string
-
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
- CountAny bool
-}
-
-type GeoSearchLocationQuery struct {
- GeoSearchQuery
-
- WithCoord bool
- WithDist bool
- WithHash bool
-}
-
-type GeoSearchStoreQuery struct {
- GeoSearchQuery
-
- // When using the StoreDist option, the command stores the items in a
- // sorted set populated with their distance from the center of the circle or box,
- // as a floating-point number, in the same unit specified for that shape.
- StoreDist bool
-}
-
-func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
- args = geoSearchArgs(&q.GeoSearchQuery, args)
-
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithHash {
- args = append(args, "withhash")
- }
-
- return args
-}
-
-func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
- if q.Member != "" {
- args = append(args, "frommember", q.Member)
- } else {
- args = append(args, "fromlonlat", q.Longitude, q.Latitude)
- }
-
- if q.Radius > 0 {
- if q.RadiusUnit == "" {
- q.RadiusUnit = "km"
- }
- args = append(args, "byradius", q.Radius, q.RadiusUnit)
- } else {
- if q.BoxUnit == "" {
- q.BoxUnit = "km"
- }
- args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
- }
-
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
-
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- if q.CountAny {
- args = append(args, "any")
- }
- }
-
- return args
-}
-
-type GeoSearchLocationCmd struct {
- baseCmd
-
- opt *GeoSearchLocationQuery
- val []GeoLocation
-}
-
-var _ Cmder = (*GeoSearchLocationCmd)(nil)
-
-func NewGeoSearchLocationCmd(
- ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
-) *GeoSearchLocationCmd {
- return &GeoSearchLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- opt: opt,
- }
-}
-
-func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
- cmd.val = val
-}
-
-func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
- return cmd.val
-}
-
-func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *GeoSearchLocationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]GeoLocation, n)
- for i := 0; i < n; i++ {
- _, err = rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- var loc GeoLocation
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return err
- }
- if cmd.opt.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithCoord {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if nn != 2 {
- return fmt.Errorf("got %d coordinates, expected 2", nn)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
-
- cmd.val[i] = loc
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type GeoPos struct {
- Longitude, Latitude float64
-}
-
-type GeoPosCmd struct {
- baseCmd
-
- val []*GeoPos
-}
-
-var _ Cmder = (*GeoPosCmd)(nil)
-
-func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
- cmd.val = val
-}
-
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.val
-}
-
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]*GeoPos, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- longitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- latitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = &GeoPos{
- Longitude: longitude,
- Latitude: latitude,
- }
- return nil, nil
- })
- if err != nil {
- if err == Nil {
- cmd.val[i] = nil
- continue
- }
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
-}
-
-type CommandsInfoCmd struct {
- baseCmd
-
- val map[string]*CommandInfo
-}
-
-var _ Cmder = (*CommandsInfoCmd)(nil)
-
-func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
- cmd.val = val
-}
-
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
- return cmd.val
-}
-
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *CommandsInfoCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- cmd.val[vv.Name] = vv
- }
- return nil, nil
- })
- return err
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- const numArgRedis5 = 6
- const numArgRedis6 = 7
-
- switch n {
- case numArgRedis5, numArgRedis6:
- // continue
- default:
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
- }
-
- var cmd CommandInfo
- var err error
-
- cmd.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.Flags = make([]string, n)
- for i := 0; i < len(cmd.Flags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.Flags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.Flags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- if n == numArgRedis5 {
- return &cmd, nil
- }
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.ACLFlags = make([]string, n)
- for i := 0; i < len(cmd.ACLFlags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.ACLFlags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.ACLFlags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- return &cmd, nil
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsInfoCache struct {
- fn func(ctx context.Context) (map[string]*CommandInfo, error)
-
- once internal.Once
- cmds map[string]*CommandInfo
-}
-
-func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
- return &cmdsInfoCache{
- fn: fn,
- }
-}
-
-func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
- err := c.once.Do(func() error {
- cmds, err := c.fn(ctx)
- if err != nil {
- return err
- }
-
- // Extensions have cmd names in upper case. Convert them to lower case.
- for k, v := range cmds {
- lower := internal.ToLower(k)
- if lower != k {
- cmds[lower] = v
- }
- }
-
- c.cmds = cmds
- return nil
- })
- return c.cmds, err
-}
-
-//------------------------------------------------------------------------------
-
-type SlowLog struct {
- ID int64
- Time time.Time
- Duration time.Duration
- Args []string
- // These are also optional fields emitted only by Redis 4.0 or greater:
- // https://redis.io/commands/slowlog#output-format
- ClientAddr string
- ClientName string
-}
-
-type SlowLogCmd struct {
- baseCmd
-
- val []SlowLog
-}
-
-var _ Cmder = (*SlowLogCmd)(nil)
-
-func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
- return &SlowLogCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
- cmd.val = val
-}
-
-func (cmd *SlowLogCmd) Val() []SlowLog {
- return cmd.val
-}
-
-func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *SlowLogCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]SlowLog, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 4 {
- err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
- return nil, err
- }
-
- id, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- createdAt, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- createdAtTime := time.Unix(createdAt, 0)
-
- costs, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- costsDuration := time.Duration(costs) * time.Microsecond
-
- cmdLen, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if cmdLen < 1 {
- err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
- return nil, err
- }
-
- cmdString := make([]string, cmdLen)
- for i := 0; i < cmdLen; i++ {
- cmdString[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- var address, name string
- for i := 4; i < n; i++ {
- str, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- if i == 4 {
- address = str
- } else if i == 5 {
- name = str
- }
- }
-
- cmd.val[i] = SlowLog{
- ID: id,
- Time: createdAtTime,
- Duration: costsDuration,
- Args: cmdString,
- ClientAddr: address,
- ClientName: name,
- }
- }
- return nil, nil
- })
- return err
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
deleted file mode 100644
index bbfe089..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands.go
+++ /dev/null
@@ -1,3475 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "io"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-// For example:
-//
-// rdb.Set(ctx, key, value, redis.KeepTTL)
-const KeepTTL = -1
-
-func usePrecise(dur time.Duration) bool {
- return dur < time.Second || dur%time.Second != 0
-}
-
-func formatMs(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Millisecond {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
- dur, time.Millisecond,
- )
- return 1
- }
- return int64(dur / time.Millisecond)
-}
-
-func formatSec(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Second {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1s",
- dur, time.Second,
- )
- return 1
- }
- return int64(dur / time.Second)
-}
-
-func appendArgs(dst, src []interface{}) []interface{} {
- if len(src) == 1 {
- return appendArg(dst, src[0])
- }
-
- dst = append(dst, src...)
- return dst
-}
-
-func appendArg(dst []interface{}, arg interface{}) []interface{} {
- switch arg := arg.(type) {
- case []string:
- for _, s := range arg {
- dst = append(dst, s)
- }
- return dst
- case []interface{}:
- dst = append(dst, arg...)
- return dst
- case map[string]interface{}:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- case map[string]string:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- default:
- return append(dst, arg)
- }
-}
-
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- Command(ctx context.Context) *CommandsInfoCmd
- ClientGetName(ctx context.Context) *StringCmd
- Echo(ctx context.Context, message interface{}) *StringCmd
- Ping(ctx context.Context) *StatusCmd
- Quit(ctx context.Context) *StatusCmd
- Del(ctx context.Context, keys ...string) *IntCmd
- Unlink(ctx context.Context, keys ...string) *IntCmd
- Dump(ctx context.Context, key string) *StringCmd
- Exists(ctx context.Context, keys ...string) *IntCmd
- Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- Keys(ctx context.Context, pattern string) *StringSliceCmd
- Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
- Move(ctx context.Context, key string, db int) *BoolCmd
- ObjectRefCount(ctx context.Context, key string) *IntCmd
- ObjectEncoding(ctx context.Context, key string) *StringCmd
- ObjectIdleTime(ctx context.Context, key string) *DurationCmd
- Persist(ctx context.Context, key string) *BoolCmd
- PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- PTTL(ctx context.Context, key string) *DurationCmd
- RandomKey(ctx context.Context) *StringCmd
- Rename(ctx context.Context, key, newkey string) *StatusCmd
- RenameNX(ctx context.Context, key, newkey string) *BoolCmd
- Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
- SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
- SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
- Touch(ctx context.Context, keys ...string) *IntCmd
- TTL(ctx context.Context, key string) *DurationCmd
- Type(ctx context.Context, key string) *StatusCmd
- Append(ctx context.Context, key, value string) *IntCmd
- Decr(ctx context.Context, key string) *IntCmd
- DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
- Get(ctx context.Context, key string) *StringCmd
- GetRange(ctx context.Context, key string, start, end int64) *StringCmd
- GetSet(ctx context.Context, key string, value interface{}) *StringCmd
- GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
- GetDel(ctx context.Context, key string) *StringCmd
- Incr(ctx context.Context, key string) *IntCmd
- IncrBy(ctx context.Context, key string, value int64) *IntCmd
- IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
- MGet(ctx context.Context, keys ...string) *SliceCmd
- MSet(ctx context.Context, values ...interface{}) *StatusCmd
- MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
- Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
- // TODO: rename to SetEx
- SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
- StrLen(ctx context.Context, key string) *IntCmd
- Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
-
- GetBit(ctx context.Context, key string, offset int64) *IntCmd
- SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
- BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
- BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
- BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
- BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
-
- Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
- ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
- SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
-
- HDel(ctx context.Context, key string, fields ...string) *IntCmd
- HExists(ctx context.Context, key, field string) *BoolCmd
- HGet(ctx context.Context, key, field string) *StringCmd
- HGetAll(ctx context.Context, key string) *StringStringMapCmd
- HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
- HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
- HKeys(ctx context.Context, key string) *StringSliceCmd
- HLen(ctx context.Context, key string) *IntCmd
- HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
- HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
- HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
- HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
- HVals(ctx context.Context, key string) *StringSliceCmd
- HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
-
- BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
- LIndex(ctx context.Context, key string, index int64) *StringCmd
- LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LLen(ctx context.Context, key string) *IntCmd
- LPop(ctx context.Context, key string) *StringCmd
- LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
- LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
- LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
- LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
- LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
- RPop(ctx context.Context, key string) *StringCmd
- RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- RPopLPush(ctx context.Context, source, destination string) *StringCmd
- RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
- BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
-
- SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
- SCard(ctx context.Context, key string) *IntCmd
- SDiff(ctx context.Context, keys ...string) *StringSliceCmd
- SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SInter(ctx context.Context, keys ...string) *StringSliceCmd
- SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
- SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
- SMembers(ctx context.Context, key string) *StringSliceCmd
- SMembersMap(ctx context.Context, key string) *StringStructMapCmd
- SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
- SPop(ctx context.Context, key string) *StringCmd
- SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRandMember(ctx context.Context, key string) *StringCmd
- SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- SUnion(ctx context.Context, keys ...string) *StringSliceCmd
- SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- XAdd(ctx context.Context, a *XAddArgs) *StringCmd
- XDel(ctx context.Context, stream string, ids ...string) *IntCmd
- XLen(ctx context.Context, stream string) *IntCmd
- XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
- XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
- XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
- XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
- XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
- XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
- XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
- XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
- XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
- XPending(ctx context.Context, stream, group string) *XPendingCmd
- XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
- XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
- XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
- XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
- XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
-
- // TODO: XTrim and XTrimApprox remove in v9.
- XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
- XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
- XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
- XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
- XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
- XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
- XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
-
- BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
- BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
-
- // TODO: remove
- // ZAddCh
- // ZIncr
- // ZAddNXCh
- // ZAddXXCh
- // ZIncrNX
- // ZIncrXX
- // in v9.
- // use ZAddArgs and ZAddArgsIncr.
-
- ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
- ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
- ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
- ZCard(ctx context.Context, key string) *IntCmd
- ZCount(ctx context.Context, key, min, max string) *IntCmd
- ZLexCount(ctx context.Context, key, min, max string) *IntCmd
- ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
- ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
- ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
- ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
- ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
- ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
- ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
- ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
- ZRank(ctx context.Context, key, member string) *IntCmd
- ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
- ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
- ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
- ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRevRank(ctx context.Context, key, member string) *IntCmd
- ZScore(ctx context.Context, key, member string) *FloatCmd
- ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
- ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
- ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
- ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
- ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
- ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
- ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
- PFCount(ctx context.Context, keys ...string) *IntCmd
- PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
-
- BgRewriteAOF(ctx context.Context) *StatusCmd
- BgSave(ctx context.Context) *StatusCmd
- ClientKill(ctx context.Context, ipPort string) *StatusCmd
- ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
- ClientList(ctx context.Context) *StringCmd
- ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
- ClientID(ctx context.Context) *IntCmd
- ConfigGet(ctx context.Context, parameter string) *SliceCmd
- ConfigResetStat(ctx context.Context) *StatusCmd
- ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
- ConfigRewrite(ctx context.Context) *StatusCmd
- DBSize(ctx context.Context) *IntCmd
- FlushAll(ctx context.Context) *StatusCmd
- FlushAllAsync(ctx context.Context) *StatusCmd
- FlushDB(ctx context.Context) *StatusCmd
- FlushDBAsync(ctx context.Context) *StatusCmd
- Info(ctx context.Context, section ...string) *StringCmd
- LastSave(ctx context.Context) *IntCmd
- Save(ctx context.Context) *StatusCmd
- Shutdown(ctx context.Context) *StatusCmd
- ShutdownSave(ctx context.Context) *StatusCmd
- ShutdownNoSave(ctx context.Context) *StatusCmd
- SlaveOf(ctx context.Context, host, port string) *StatusCmd
- Time(ctx context.Context) *TimeCmd
- DebugObject(ctx context.Context, key string) *StringCmd
- ReadOnly(ctx context.Context) *StatusCmd
- ReadWrite(ctx context.Context) *StatusCmd
- MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
-
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptFlush(ctx context.Context) *StatusCmd
- ScriptKill(ctx context.Context) *StatusCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-
- Publish(ctx context.Context, channel string, message interface{}) *IntCmd
- PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
- PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
- PubSubNumPat(ctx context.Context) *IntCmd
-
- ClusterSlots(ctx context.Context) *ClusterSlotsCmd
- ClusterNodes(ctx context.Context) *StringCmd
- ClusterMeet(ctx context.Context, host, port string) *StatusCmd
- ClusterForget(ctx context.Context, nodeID string) *StatusCmd
- ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
- ClusterResetSoft(ctx context.Context) *StatusCmd
- ClusterResetHard(ctx context.Context) *StatusCmd
- ClusterInfo(ctx context.Context) *StringCmd
- ClusterKeySlot(ctx context.Context, key string) *IntCmd
- ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
- ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
- ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
- ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
- ClusterSaveConfig(ctx context.Context) *StatusCmd
- ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
- ClusterFailover(ctx context.Context) *StatusCmd
- ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
-
- GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
- GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
- GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
- GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
- GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
- GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
- GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
- GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(ctx context.Context, password string) *StatusCmd
- AuthACL(ctx context.Context, username, password string) *StatusCmd
- Select(ctx context.Context, index int) *StatusCmd
- SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
- ClientSetName(ctx context.Context, name string) *BoolCmd
-}
-
-var (
- _ Cmdable = (*Client)(nil)
- _ Cmdable = (*Tx)(nil)
- _ Cmdable = (*Ring)(nil)
- _ Cmdable = (*ClusterClient)(nil)
-)
-
-type cmdable func(ctx context.Context, cmd Cmder) error
-
-type statefulCmdable func(ctx context.Context, cmd Cmder) error
-
-//------------------------------------------------------------------------------
-
-func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// AuthACL Perform an AUTH command, using the given user and pass.
-// Should be used to authenticate the current connection with one of the connections defined in the ACL list
-// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
-func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", username, password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "select", index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientSetName assigns a name to the connection.
-func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "setname", name)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd(ctx, "command")
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientGetName returns the name of the connection.
-func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "getname")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "echo", message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Ping(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "ping")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Quit(_ context.Context) *StatusCmd {
- panic("not implemented")
-}
-
-func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "dump", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "")
-}
-
-func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "NX")
-}
-
-func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "XX")
-}
-
-func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "GT")
-}
-
-func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "LT")
-}
-
-func (c cmdable) expire(
- ctx context.Context, key string, expiration time.Duration, mode string,
-) *BoolCmd {
- args := make([]interface{}, 3, 4)
- args[0] = "expire"
- args[1] = key
- args[2] = formatSec(ctx, expiration)
- if mode != "" {
- args = append(args, mode)
- }
-
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "keys", pattern)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
- cmd := NewBoolCmd(ctx, "move", key, db)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "object", "refcount", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "object", "encoding", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "persist", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- ctx,
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "randomkey")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "rename", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- "replace",
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type Sort struct {
- By string
- Offset, Count int64
- Get []string
- Order string
- Alpha bool
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
- }
- if sort.Alpha {
- args = append(args, "alpha")
- }
- return args
-}
-
-func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
- if store != "" {
- args = append(args, "store", store)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, len(keys)+1)
- args[0] = "touch"
- for i, key := range keys {
- args[i+1] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "type", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "append", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "decr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
- cmd := NewIntCmd(ctx, "decrby", key, decrement)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
-func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "get", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
- cmd := NewStringCmd(ctx, "getrange", key, start, end)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "getset", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
-// Requires Redis >= 6.2.0.
-func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
- args := make([]interface{}, 0, 4)
- args = append(args, "getex", key)
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == 0 {
- args = append(args, "persist")
- }
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetDel redis-server version >= 6.2.0.
-func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "getdel", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "incr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
- cmd := NewIntCmd(ctx, "incrby", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSet is like Set but accepts multiple values:
-// - MSet("key1", "value1", "key2", "value2")
-// - MSet([]string{"key1", "value1", "key2", "value2"})
-// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "mset"
- args = appendArgs(args, values)
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSetNX is like SetNX but accepts multiple values:
-// - MSetNX("key1", "value1", "key2", "value2")
-// - MSetNX([]string{"key1", "value1", "key2", "value2"})
-// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "msetnx"
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Set Redis `SET key value [expiration]` command.
-// Use expiration for `SETEX`-like behavior.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- args := make([]interface{}, 3, 5)
- args[0] = "set"
- args[1] = key
- args[2] = value
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == KeepTTL {
- args = append(args, "keepttl")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetArgs provides arguments for the SetArgs function.
-type SetArgs struct {
- // Mode can be `NX` or `XX` or empty.
- Mode string
-
- // Zero `TTL` or `Expiration` means that the key has no expiration time.
- TTL time.Duration
- ExpireAt time.Time
-
- // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
- Get bool
-
- // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
- // otherwise you will receive an error: (error) ERR syntax error.
- KeepTTL bool
-}
-
-// SetArgs supports all the options that the SET command supports.
-// It is the alternative to the Set function when you want
-// to have more control over the options.
-func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
- args := []interface{}{"set", key, value}
-
- if a.KeepTTL {
- args = append(args, "keepttl")
- }
-
- if !a.ExpireAt.IsZero() {
- args = append(args, "exat", a.ExpireAt.Unix())
- }
- if a.TTL > 0 {
- if usePrecise(a.TTL) {
- args = append(args, "px", formatMs(ctx, a.TTL))
- } else {
- args = append(args, "ex", formatSec(ctx, a.TTL))
- }
- }
-
- if a.Mode != "" {
- args = append(args, a.Mode)
- }
-
- if a.Get {
- args = append(args, "get")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetEX Redis `SETEX key expiration value` command.
-func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetNX Redis `SET key value [expiration] NX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- // Use old `SETNX` to support old Redis versions.
- cmd = NewBoolCmd(ctx, "setnx", key, value)
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetXX Redis `SET key value [expiration] XX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- cmd = NewBoolCmd(ctx, "set", key, value, "xx")
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "setrange", key, offset, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "strlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
- args := []interface{}{"copy", sourceKey, destKey, "DB", db}
- if replace {
- args = append(args, "REPLACE")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
- cmd := NewIntCmd(ctx, "getbit", key, offset)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "setbit",
- key,
- offset,
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type BitCount struct {
- Start, End int64
-}
-
-func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "and", destKey, keys...)
-}
-
-func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "or", destKey, keys...)
-}
-
-func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "xor", destKey, keys...)
-}
-
-func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
- return c.bitOp(ctx, "not", destKey, key)
-}
-
-func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
- a := make([]interface{}, 0, 2+len(args))
- a = append(a, "bitfield")
- a = append(a, key)
- a = append(a, args...)
- cmd := NewIntSliceCmd(ctx, a...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- if keyType != "" {
- args = append(args, "type", keyType)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hdel"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hexists", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
- cmd := NewStringCmd(ctx, "hget", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "hgetall", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
- cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hkeys", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "hlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMGet returns the values for the specified fields in the hash stored at key.
-// It returns an interface{} to distinguish between empty string and nil value.
-func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hmget"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HSet accepts values in following formats:
-// - HSet("myhash", "key1", "value1", "key2", "value2")
-// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
-// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
-//
-// Note that it requires Redis v4 for multiple field/value pairs support.
-func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
-func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hmset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hvals", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HRandField redis-server version >= 6.2.0.
-func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "hrandfield", key, count)
- if withValues {
- args = append(args, "withvalues")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "blpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "brpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(keys)+1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
- cmd := NewStringCmd(
- ctx,
- "brpoplpush",
- source,
- destination,
- formatSec(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
- cmd := NewStringCmd(ctx, "lindex", key, index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "llen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "lpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "lpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type LPosArgs struct {
- Rank, MaxLen int64
-}
-
-func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
- args := []interface{}{"lpos", key, value}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
-
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
- args := []interface{}{"lpos", key, value, "count", count}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
- cmd := NewIntSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(
- ctx,
- "lrange",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "lrem", key, count, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
- cmd := NewStatusCmd(ctx, "lset", key, index, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "ltrim",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "rpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
- cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BLMove(
- ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
-) *StringCmd {
- cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "sadd"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "scard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sdiff"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sdiffstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sinter"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sinterstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "sismember", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
-func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "smismember"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembers Redis `SMEMBERS key` command output as a slice.
-func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembersMap Redis `SMEMBERS key` command output as a map.
-func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
- cmd := NewStringStructMapCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "smove", source, destination, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPop Redis `SPOP key` command.
-func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "spop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPopN Redis `SPOP key count` command.
-func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "spop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMember Redis `SRANDMEMBER key` command.
-func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "srandmember", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMemberN Redis `SRANDMEMBER key count` command.
-func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "srem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sunion"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sunionstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// XAddArgs accepts values in the following formats:
-// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
-// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
-// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
-//
-// Note that map will not preserve the order of key-value pairs.
-// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
-type XAddArgs struct {
- Stream string
- NoMkStream bool
- MaxLen int64 // MAXLEN N
-
- // Deprecated: use MaxLen+Approx, remove in v9.
- MaxLenApprox int64 // MAXLEN ~ N
-
- MinID string
- // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
- Approx bool
- Limit int64
- ID string
- Values interface{}
-}
-
-// XAdd a.Limit has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 11)
- args = append(args, "xadd", a.Stream)
- if a.NoMkStream {
- args = append(args, "nomkstream")
- }
- switch {
- case a.MaxLen > 0:
- if a.Approx {
- args = append(args, "maxlen", "~", a.MaxLen)
- } else {
- args = append(args, "maxlen", a.MaxLen)
- }
- case a.MaxLenApprox > 0:
- // TODO remove in v9.
- args = append(args, "maxlen", "~", a.MaxLenApprox)
- case a.MinID != "":
- if a.Approx {
- args = append(args, "minid", "~", a.MinID)
- } else {
- args = append(args, "minid", a.MinID)
- }
- }
- if a.Limit > 0 {
- args = append(args, "limit", a.Limit)
- }
- if a.ID != "" {
- args = append(args, a.ID)
- } else {
- args = append(args, "*")
- }
- args = appendArg(args, a.Values)
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
- args := []interface{}{"xdel", stream}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
- cmd := NewIntCmd(ctx, "xlen", stream)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadArgs struct {
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
-}
-
-func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 6+len(a.Streams))
- args = append(args, "xread")
-
- keyPos := int8(1)
- if a.Count > 0 {
- args = append(args, "count")
- args = append(args, a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block")
- args = append(args, int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
- return c.XRead(ctx, &XReadArgs{
- Streams: streams,
- Block: -1,
- })
-}
-
-func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadGroupArgs struct {
- Group string
- Consumer string
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
- NoAck bool
-}
-
-func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 10+len(a.Streams))
- args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
-
- keyPos := int8(4)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block", int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- if a.NoAck {
- args = append(args, "noack")
- keyPos++
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
- args := []interface{}{"xack", stream, group}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
- cmd := NewXPendingCmd(ctx, "xpending", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XPendingExtArgs struct {
- Stream string
- Group string
- Idle time.Duration
- Start string
- End string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "xpending", a.Stream, a.Group)
- if a.Idle != 0 {
- args = append(args, "idle", formatMs(ctx, a.Idle))
- }
- args = append(args, a.Start, a.End, a.Count)
- if a.Consumer != "" {
- args = append(args, a.Consumer)
- }
- cmd := NewXPendingExtCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XAutoClaimArgs struct {
- Stream string
- Group string
- MinIdle time.Duration
- Start string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
- args := xAutoClaimArgs(ctx, a)
- cmd := NewXAutoClaimCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
- args := xAutoClaimArgs(ctx, a)
- args = append(args, "justid")
- cmd := NewXAutoClaimJustIDCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
- args := make([]interface{}, 0, 8)
- args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- }
- return args
-}
-
-type XClaimArgs struct {
- Stream string
- Group string
- Consumer string
- MinIdle time.Duration
- Messages []string
-}
-
-func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
- args := xClaimArgs(a)
- cmd := NewXMessageSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
- args := xClaimArgs(a)
- args = append(args, "justid")
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 5+len(a.Messages))
- args = append(args,
- "xclaim",
- a.Stream,
- a.Group, a.Consumer,
- int64(a.MinIdle/time.Millisecond))
- for _, id := range a.Messages {
- args = append(args, id)
- }
- return args
-}
-
-// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
-// example:
-// XTRIM key MAXLEN/MINID threshold LIMIT limit.
-// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
-// The redis-server version is lower than 6.2, please set limit to 0.
-func (c cmdable) xTrim(
- ctx context.Context, key, strategy string,
- approx bool, threshold interface{}, limit int64,
-) *IntCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xtrim", key, strategy)
- if approx {
- args = append(args, "~")
- }
- args = append(args, threshold)
- if limit > 0 {
- args = append(args, "limit", limit)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Deprecated: use XTrimMaxLen, remove in v9.
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// Deprecated: use XTrimMaxLenApprox, remove in v9.
-func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
-}
-
-// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MAXLEN maxLen
-func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
-func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
-}
-
-// XTrimMinID No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MINID minID
-func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
- return c.xTrim(ctx, key, "minid", false, minID, 0)
-}
-
-// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MINID ~ minID LIMIT limit
-func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "minid", true, minID, limit)
-}
-
-func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
- cmd := NewXInfoConsumersCmd(ctx, key, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
- cmd := NewXInfoGroupsCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
- cmd := NewXInfoStreamCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// XInfoStreamFull XINFO STREAM FULL [COUNT count]
-// redis-server >= 6.0.
-func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
- args := make([]interface{}, 0, 6)
- args = append(args, "xinfo", "stream", key, "full")
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewXInfoStreamFullCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Z represents sorted set member.
-type Z struct {
- Score float64
- Member interface{}
-}
-
-// ZWithKey represents sorted set member including the name of the key where it was popped.
-type ZWithKey struct {
- Z
- Key string
-}
-
-// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
-type ZStore struct {
- Keys []string
- Weights []float64
- // Can be SUM, MIN or MAX.
- Aggregate string
-}
-
-func (z ZStore) len() (n int) {
- n = len(z.Keys)
- if len(z.Weights) > 0 {
- n += 1 + len(z.Weights)
- }
- if z.Aggregate != "" {
- n += 2
- }
- return n
-}
-
-func (z ZStore) appendArgs(args []interface{}) []interface{} {
- for _, key := range z.Keys {
- args = append(args, key)
- }
- if len(z.Weights) > 0 {
- args = append(args, "weights")
- for _, weights := range z.Weights {
- args = append(args, weights)
- }
- }
- if z.Aggregate != "" {
- args = append(args, "aggregate", z.Aggregate)
- }
- return args
-}
-
-// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
-func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmax"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
-func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmin"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
-type ZAddArgs struct {
- NX bool
- XX bool
- LT bool
- GT bool
- Ch bool
- Members []Z
-}
-
-func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
- a := make([]interface{}, 0, 6+2*len(args.Members))
- a = append(a, "zadd", key)
-
- // The GT, LT and NX options are mutually exclusive.
- if args.NX {
- a = append(a, "nx")
- } else {
- if args.XX {
- a = append(a, "xx")
- }
- if args.GT {
- a = append(a, "gt")
- } else if args.LT {
- a = append(a, "lt")
- }
- }
- if args.Ch {
- a = append(a, "ch")
- }
- if incr {
- a = append(a, "incr")
- }
- for _, m := range args.Members {
- a = append(a, m.Score)
- a = append(a, m.Member)
- }
- return a
-}
-
-func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
- cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// TODO: Compatible with v8 api, will be removed in v9.
-func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
- args.Members = make([]Z, len(members))
- for i, m := range members {
- args.Members[i] = *m
- }
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAdd Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{}, members...)
-}
-
-// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- }, members...)
-}
-
-// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- }, members...)
-}
-
-// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- Ch: true,
- }, members...)
-}
-
-// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// NX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- Ch: true,
- }, members...)
-}
-
-// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// XX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- Ch: true,
- }, members...)
-}
-
-// ZIncr Redis `ZADD key INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- Members: []Z{*member},
- })
-}
-
-// ZIncrNX Redis `ZADD key NX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// NX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- NX: true,
- Members: []Z{*member},
- })
-}
-
-// ZIncrXX Redis `ZADD key XX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// XX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- XX: true,
- Members: []Z{*member},
- })
-}
-
-func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinterstore", destination, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "zmscore"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewFloatSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmax",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmin",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRangeArgs is all the options of the ZRange command.
-// In version> 6.2.0, you can replace the(cmd):
-// ZREVRANGE,
-// ZRANGEBYSCORE,
-// ZREVRANGEBYSCORE,
-// ZRANGEBYLEX,
-// ZREVRANGEBYLEX.
-// Please pay attention to your redis-server version.
-//
-// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
-type ZRangeArgs struct {
- Key string
-
- // When the ByScore option is provided, the open interval(exclusive) can be set.
- // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
- // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
- // For example:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "(3",
- // Stop: 8,
- // ByScore: true,
- // }
- // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
- //
- // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
- // You can set the <Start> and <Stop> options as follows:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "[abc",
- // Stop: "(def",
- // ByLex: true,
- // }
- // cmd: "ZRange example-key [abc (def ByLex"
- //
- // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
- // You can read the documentation for more information: https://redis.io/commands/zrange
- Start interface{}
- Stop interface{}
-
- // The ByScore and ByLex options are mutually exclusive.
- ByScore bool
- ByLex bool
-
- Rev bool
-
- // limit offset count.
- Offset int64
- Count int64
-}
-
-func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
- // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
- if z.Rev && (z.ByScore || z.ByLex) {
- args = append(args, z.Key, z.Stop, z.Start)
- } else {
- args = append(args, z.Key, z.Start, z.Stop)
- }
-
- if z.ByScore {
- args = append(args, "byscore")
- } else if z.ByLex {
- args = append(args, "bylex")
- }
- if z.Rev {
- args = append(args, "rev")
- }
- if z.Offset != 0 || z.Count != 0 {
- args = append(args, "limit", z.Offset, z.Count)
- }
- return args
-}
-
-func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- return c.ZRangeArgs(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-type ZRangeBy struct {
- Min, Max string
- Offset, Count int64
-}
-
-func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Min, opt.Max}
- if withScores {
- args = append(args, "withscores")
- }
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
-}
-
-func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
-}
-
-func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrangestore", dst)
- args = z.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "zrem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "zremrangebyrank",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Max, opt.Min}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
-}
-
-func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
-}
-
-func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrevrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zscore", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunionstore", dest, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRandMember redis-server version >= 6.2.0.
-func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "zrandmember", key, count)
- if withScores {
- args = append(args, "withscores")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiff redis-server version >= 6.2.0.
-func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffWithScores redis-server version >= 6.2.0.
-func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
- args[len(keys)+2] = "withscores"
-
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffStore redis-server version >=6.2.0.
-func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 0, 3+len(keys))
- args = append(args, "zdiffstore", destination, len(keys))
- for _, key := range keys {
- args = append(args, key)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(els))
- args[0] = "pfadd"
- args[1] = key
- args = appendArgs(args, els)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "pfcount"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "pfmerge"
- args[1] = dest
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgrewriteaof")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientKillByFilter is new style syntax, while the ClientKill is old
-//
-// CLIENT KILL <option> [value] ... <option> [value]
-func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "client"
- args[1] = "kill"
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientList(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "list")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientID(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "id")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "config", "get", parameter)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "resetstat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "rewrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
- args := []interface{}{"info"}
- if len(section) > 0 {
- args = append(args, section[0])
- }
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LastSave(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "lastsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Save(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "save")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
- var args []interface{}
- if modifier == "" {
- args = []interface{}{"shutdown"}
- } else {
- args = []interface{}{"shutdown", modifier}
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- if err := cmd.Err(); err != nil {
- if err == io.EOF {
- // Server quit as expected.
- cmd.err = nil
- }
- } else {
- // Server did not quit. String reply contains the reason.
- cmd.err = errors.New(cmd.val)
- cmd.val = ""
- }
- return cmd
-}
-
-func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "")
-}
-
-func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "save")
-}
-
-func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "nosave")
-}
-
-func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "slaveof", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
- cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Sync(_ context.Context) {
- panic("not implemented")
-}
-
-func (c cmdable) Time(ctx context.Context) *TimeCmd {
- cmd := NewTimeCmd(ctx, "time")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "debug", "object", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readonly")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readwrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
- args := []interface{}{"memory", "usage", key}
- if len(samples) > 0 {
- if len(samples) != 1 {
- panic("MemoryUsage expects single sample count")
- }
- args = append(args, "SAMPLES", samples[0])
- }
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "eval"
- cmdArgs[1] = script
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "evalsha"
- cmdArgs[1] = sha1
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "kill")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Publish posts the message to the channel.
-func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "publish", channel, message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
- args := []interface{}{"pubsub", "channels"}
- if pattern != "*" {
- args = append(args, pattern)
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
- args := make([]interface{}, 2+len(channels))
- args[0] = "pubsub"
- args[1] = "numsub"
- for i, channel := range channels {
- args[2+i] = channel
- }
- cmd := NewStringIntMapCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "pubsub", "numpat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
- cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "nodes")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "info")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "delslots"
- for i, slot := range slots {
- args[2+i] = slot
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterDelSlots(ctx, slots...)
-}
-
-func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "failover")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "addslots"
- for i, num := range slots {
- args[2+i] = num
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterAddSlots(ctx, slots...)
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
- args := make([]interface{}, 2+3*len(geoLocation))
- args[0] = "geoadd"
- args[1] = key
- for i, eachLoc := range geoLocation {
- args[2+3*i] = eachLoc.Longitude
- args[2+3*i+1] = eachLoc.Latitude
- args[2+3*i+2] = eachLoc.Name
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUS_RO command.
-func (c cmdable) GeoRadius(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusStore is a writing GEORADIUS command.
-func (c cmdable) GeoRadiusStore(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadius", key, longitude, latitude)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
-func (c cmdable) GeoRadiusByMember(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
-func (c cmdable) GeoRadiusByMemberStore(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadiusbymember", key, member)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
- args := make([]interface{}, 0, 13)
- args = append(args, "geosearch", key)
- args = geoSearchArgs(q, args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchLocation(
- ctx context.Context, key string, q *GeoSearchLocationQuery,
-) *GeoSearchLocationCmd {
- args := make([]interface{}, 0, 16)
- args = append(args, "geosearch", key)
- args = geoSearchLocationArgs(q, args)
- cmd := NewGeoSearchLocationCmd(ctx, q, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
- args := make([]interface{}, 0, 15)
- args = append(args, "geosearchstore", store, key)
- args = geoSearchArgs(&q.GeoSearchQuery, args)
- if q.StoreDist {
- args = append(args, "storedist")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoDist(
- ctx context.Context, key string, member1, member2, unit string,
-) *FloatCmd {
- if unit == "" {
- unit = "km"
- }
- cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geohash"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geopos"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewGeoPosCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
deleted file mode 100644
index d2610c2..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.mod
+++ /dev/null
@@ -1,20 +0,0 @@
-module github.com/go-redis/redis/v8
-
-go 1.17
-
-require (
- github.com/cespare/xxhash/v2 v2.1.2
- github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
- github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.18.1
-)
-
-require (
- github.com/fsnotify/fsnotify v1.4.9 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
- golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
- golang.org/x/text v0.3.6 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
-)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
deleted file mode 100644
index e88f31a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/go.sum
+++ /dev/null
@@ -1,108 +0,0 @@
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
-github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
-github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
deleted file mode 100644
index 2365dbc..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/main_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package pool_test
-
-import (
- "context"
- "net"
- "sync"
- "testing"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestGinkgoSuite(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "pool")
-}
-
-func perform(n int, cbs ...func(int)) {
- var wg sync.WaitGroup
- for _, cb := range cbs {
- for i := 0; i < n; i++ {
- wg.Add(1)
- go func(cb func(int), i int) {
- defer GinkgoRecover()
- defer wg.Done()
-
- cb(i)
- }(cb, i)
- }
- }
- wg.Wait()
-}
-
-func dummyDialer(context.Context) (net.Conn, error) {
- return &net.TCPConn{}, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
deleted file mode 100644
index 0e6ca77..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package proto
-
-import (
- "bufio"
- "fmt"
- "io"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// redis resp protocol data type.
-const (
- ErrorReply = '-'
- StatusReply = '+'
- IntReply = ':'
- StringReply = '$'
- ArrayReply = '*'
-)
-
-//------------------------------------------------------------------------------
-
-const Nil = RedisError("redis: nil") // nolint:errname
-
-type RedisError string
-
-func (e RedisError) Error() string { return string(e) }
-
-func (RedisError) RedisError() {}
-
-//------------------------------------------------------------------------------
-
-type MultiBulkParse func(*Reader, int64) (interface{}, error)
-
-type Reader struct {
- rd *bufio.Reader
- _buf []byte
-}
-
-func NewReader(rd io.Reader) *Reader {
- return &Reader{
- rd: bufio.NewReader(rd),
- _buf: make([]byte, 64),
- }
-}
-
-func (r *Reader) Buffered() int {
- return r.rd.Buffered()
-}
-
-func (r *Reader) Peek(n int) ([]byte, error) {
- return r.rd.Peek(n)
-}
-
-func (r *Reader) Reset(rd io.Reader) {
- r.rd.Reset(rd)
-}
-
-func (r *Reader) ReadLine() ([]byte, error) {
- line, err := r.readLine()
- if err != nil {
- return nil, err
- }
- if isNilReply(line) {
- return nil, Nil
- }
- return line, nil
-}
-
-// readLine that returns an error if:
-// - there is a pending read error;
-// - or line does not end with \r\n.
-func (r *Reader) readLine() ([]byte, error) {
- b, err := r.rd.ReadSlice('\n')
- if err != nil {
- if err != bufio.ErrBufferFull {
- return nil, err
- }
-
- full := make([]byte, len(b))
- copy(full, b)
-
- b, err = r.rd.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
-
- full = append(full, b...) //nolint:makezero
- b = full
- }
- if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
- return nil, fmt.Errorf("redis: invalid reply: %q", b)
- }
- return b[:len(b)-2], nil
-}
-
-func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
-
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- case StringReply:
- return r.readStringReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- if m == nil {
- err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
- return nil, err
- }
- return m(r, n)
- }
- return nil, fmt.Errorf("redis: can't parse %.100q", line)
-}
-
-func (r *Reader) ReadIntReply() (int64, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- default:
- return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadString() (string, error) {
- line, err := r.ReadLine()
- if err != nil {
- return "", err
- }
- switch line[0] {
- case ErrorReply:
- return "", ParseErrorReply(line)
- case StringReply:
- return r.readStringReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return string(line[1:]), nil
- default:
- return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
- }
-}
-
-func (r *Reader) readStringReply(line []byte) (string, error) {
- if isNilReply(line) {
- return "", Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return "", err
- }
-
- b := make([]byte, replyLen+2)
- _, err = io.ReadFull(r.rd, b)
- if err != nil {
- return "", err
- }
-
- return util.BytesToString(b[:replyLen]), nil
-}
-
-func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- return m(r, n)
- default:
- return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadArrayLen() (int, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return 0, err
- }
- return int(n), nil
- default:
- return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadScanReply() ([]string, uint64, error) {
- n, err := r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
- if n != 2 {
- return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
- }
-
- cursor, err := r.ReadUint()
- if err != nil {
- return nil, 0, err
- }
-
- n, err = r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
-
- keys := make([]string, n)
-
- for i := 0; i < n; i++ {
- key, err := r.ReadString()
- if err != nil {
- return nil, 0, err
- }
- keys[i] = key
- }
-
- return keys, cursor, err
-}
-
-func (r *Reader) ReadInt() (int64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseInt(b, 10, 64)
-}
-
-func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseUint(b, 10, 64)
-}
-
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseFloat(b, 64)
-}
-
-func (r *Reader) readTmpBytesReply() ([]byte, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StringReply:
- return r._readTmpBytesReply(line)
- case StatusReply:
- return line[1:], nil
- default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
- }
-}
-
-func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return nil, err
- }
-
- buf := r.buf(replyLen + 2)
- _, err = io.ReadFull(r.rd, buf)
- if err != nil {
- return nil, err
- }
-
- return buf[:replyLen], nil
-}
-
-func (r *Reader) buf(n int) []byte {
- if n <= cap(r._buf) {
- return r._buf[:n]
- }
- d := n - cap(r._buf)
- r._buf = append(r._buf, make([]byte, d)...)
- return r._buf
-}
-
-func isNilReply(b []byte) bool {
- return len(b) == 3 &&
- (b[0] == StringReply || b[0] == ArrayReply) &&
- b[1] == '-' && b[2] == '1'
-}
-
-func ParseErrorReply(line []byte) error {
- return RedisError(string(line[1:]))
-}
-
-func parseArrayLen(line []byte) (int64, error) {
- if isNilReply(line) {
- return 0, Nil
- }
- return util.ParseInt(line[1:], 10, 64)
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
deleted file mode 100644
index b8c99dd..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/reader_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package proto_test
-
-import (
- "bytes"
- "io"
- "testing"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-func BenchmarkReader_ParseReply_Status(b *testing.B) {
- benchmarkParseReply(b, "+OK\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Int(b *testing.B) {
- benchmarkParseReply(b, ":1\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Error(b *testing.B) {
- benchmarkParseReply(b, "-Error message\r\n", nil, true)
-}
-
-func BenchmarkReader_ParseReply_String(b *testing.B) {
- benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
-}
-
-func BenchmarkReader_ParseReply_Slice(b *testing.B) {
- benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", multiBulkParse, false)
-}
-
-func TestReader_ReadLine(t *testing.T) {
- original := bytes.Repeat([]byte("a"), 8192)
- original[len(original)-2] = '\r'
- original[len(original)-1] = '\n'
- r := proto.NewReader(bytes.NewReader(original))
- read, err := r.ReadLine()
- if err != nil && err != io.EOF {
- t.Errorf("Should be able to read the full buffer: %v", err)
- }
-
- if bytes.Compare(read, original[:len(original)-2]) != 0 {
- t.Errorf("Values must be equal: %d expected %d", len(read), len(original[:len(original)-2]))
- }
-}
-
-func benchmarkParseReply(b *testing.B, reply string, m proto.MultiBulkParse, wanterr bool) {
- buf := new(bytes.Buffer)
- for i := 0; i < b.N; i++ {
- buf.WriteString(reply)
- }
- p := proto.NewReader(buf)
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- _, err := p.ReadReply(m)
- if !wanterr && err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func multiBulkParse(p *proto.Reader, n int64) (interface{}, error) {
- vv := make([]interface{}, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := p.ReadReply(multiBulkParse)
- if err != nil {
- return nil, err
- }
- vv = append(vv, v)
- }
- return vv, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
deleted file mode 100644
index ebae569..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package proto_test
-
-import (
- "bytes"
- "encoding"
- "testing"
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-type MyType struct{}
-
-var _ encoding.BinaryMarshaler = (*MyType)(nil)
-
-func (t *MyType) MarshalBinary() ([]byte, error) {
- return []byte("hello"), nil
-}
-
-var _ = Describe("WriteBuffer", func() {
- var buf *bytes.Buffer
- var wr *proto.Writer
-
- BeforeEach(func() {
- buf = new(bytes.Buffer)
- wr = proto.NewWriter(buf)
- })
-
- It("should write args", func() {
- err := wr.WriteArgs([]interface{}{
- "string",
- 12,
- 34.56,
- []byte{'b', 'y', 't', 'e', 's'},
- true,
- nil,
- })
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
- "$6\r\nstring\r\n" +
- "$2\r\n12\r\n" +
- "$5\r\n34.56\r\n" +
- "$5\r\nbytes\r\n" +
- "$1\r\n1\r\n" +
- "$0\r\n" +
- "\r\n")))
- })
-
- It("should append time", func() {
- tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
- err := wr.WriteArgs([]interface{}{tm})
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Len()).To(Equal(41))
- })
-
- It("should append marshalable args", func() {
- err := wr.WriteArgs([]interface{}{&MyType{}})
- Expect(err).NotTo(HaveOccurred())
-
- Expect(buf.Len()).To(Equal(15))
- })
-})
-
-type discard struct{}
-
-func (discard) Write(b []byte) (int, error) {
- return len(b), nil
-}
-
-func (discard) WriteString(s string) (int, error) {
- return len(s), nil
-}
-
-func (discard) WriteByte(c byte) error {
- return nil
-}
-
-func BenchmarkWriteBuffer_Append(b *testing.B) {
- buf := proto.NewWriter(discard{})
- args := []interface{}{"hello", "world", "foo", "bar"}
-
- for i := 0; i < b.N; i++ {
- err := buf.WriteArgs(args)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
deleted file mode 100644
index fd2f434..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/safe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package internal
-
-func String(b []byte) string {
- return string(b)
-}
-
-func Bytes(s string) []byte {
- return []byte(s)
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
deleted file mode 100644
index 9f2e418..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/unsafe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import "unsafe"
-
-// String converts byte slice to string.
-func String(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// Bytes converts string to byte slice.
-func Bytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
deleted file mode 100644
index b1dd0bd..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package redis
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("newClusterState", func() {
- var state *clusterState
-
- createClusterState := func(slots []ClusterSlot) *clusterState {
- opt := &ClusterOptions{}
- opt.init()
- nodes := newClusterNodes(opt)
- state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
- Expect(err).NotTo(HaveOccurred())
- return state
- }
-
- Describe("sorting", func() {
- BeforeEach(func() {
- state = createClusterState([]ClusterSlot{{
- Start: 1000,
- End: 1999,
- }, {
- Start: 0,
- End: 999,
- }, {
- Start: 2000,
- End: 2999,
- }})
- })
-
- It("sorts slots", func() {
- Expect(state.slots).To(Equal([]*clusterSlot{
- {start: 0, end: 999, nodes: nil},
- {start: 1000, end: 1999, nodes: nil},
- {start: 2000, end: 2999, nodes: nil},
- }))
- })
- })
-
- Describe("loopback", func() {
- BeforeEach(func() {
- state = createClusterState([]ClusterSlot{{
- Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
- }, {
- Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
- }, {
- Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
- }, {
- Nodes: []ClusterNode{{Addr: ":1234"}},
- }})
- })
-
- It("replaces loopback hosts in addresses", func() {
- slotAddr := func(slot *clusterSlot) string {
- return slot.nodes[0].Client.Options().Addr
- }
-
- Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
- Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
- Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
- Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
- })
- })
-})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
deleted file mode 100644
index e4ea4bb..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/package.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "name": "redis",
- "version": "8.11.5",
- "main": "index.js",
- "repository": "git@github.com:go-redis/redis.git",
- "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
- "license": "BSD-2-clause"
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
deleted file mode 100644
index bcf8a2a..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis.go
+++ /dev/null
@@ -1,773 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// Nil reply returned by Redis when key does not exist.
-const Nil = proto.Nil
-
-func SetLogger(logger internal.Logging) {
- internal.Logger = logger
-}
-
-//------------------------------------------------------------------------------
-
-type Hook interface {
- BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
- AfterProcess(ctx context.Context, cmd Cmder) error
-
- BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
- AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
-}
-
-type hooks struct {
- hooks []Hook
-}
-
-func (hs *hooks) lock() {
- hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
-}
-
-func (hs hooks) clone() hooks {
- clone := hs
- clone.lock()
- return clone
-}
-
-func (hs *hooks) AddHook(hook Hook) {
- hs.hooks = append(hs.hooks, hook)
-}
-
-func (hs hooks) process(
- ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmd)
- cmd.SetErr(err)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
- if retErr != nil {
- cmd.SetErr(retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmd)
- cmd.SetErr(retErr)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
- retErr = err
- cmd.SetErr(retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmds)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
- if retErr != nil {
- setCmdsErr(cmds, retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmds)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
- retErr = err
- setCmdsErr(cmds, retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processTxPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- cmds = wrapMultiExec(ctx, cmds)
- return hs.processPipeline(ctx, cmds, fn)
-}
-
-//------------------------------------------------------------------------------
-
-type baseClient struct {
- opt *Options
- connPool pool.Pooler
-
- onClose func() error // hook called when client is closed
-}
-
-func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
- return &baseClient{
- opt: opt,
- connPool: connPool,
- }
-}
-
-func (c *baseClient) clone() *baseClient {
- clone := *c
- return &clone
-}
-
-func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
- opt := c.opt.clone()
- opt.ReadTimeout = timeout
- opt.WriteTimeout = timeout
-
- clone := c.clone()
- clone.opt = opt
-
- return clone
-}
-
-func (c *baseClient) String() string {
- return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
-}
-
-func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.NewConn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = c.initConn(ctx, cn)
- if err != nil {
- _ = c.connPool.CloseConn(cn)
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
- if c.opt.Limiter != nil {
- err := c.opt.Limiter.Allow()
- if err != nil {
- return nil, err
- }
- }
-
- cn, err := c._getConn(ctx)
- if err != nil {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if cn.Inited {
- return cn, nil
- }
-
- if err := c.initConn(ctx, cn); err != nil {
- c.connPool.Remove(ctx, cn, err)
- if err := errors.Unwrap(err); err != nil {
- return nil, err
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
- if cn.Inited {
- return nil
- }
- cn.Inited = true
-
- if c.opt.Password == "" &&
- c.opt.DB == 0 &&
- !c.opt.readOnly &&
- c.opt.OnConnect == nil {
- return nil
- }
-
- connPool := pool.NewSingleConnPool(c.connPool, cn)
- conn := newConn(ctx, c.opt, connPool)
-
- _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
- if c.opt.Password != "" {
- if c.opt.Username != "" {
- pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
- } else {
- pipe.Auth(ctx, c.opt.Password)
- }
- }
-
- if c.opt.DB > 0 {
- pipe.Select(ctx, c.opt.DB)
- }
-
- if c.opt.readOnly {
- pipe.ReadOnly(ctx)
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- if c.opt.OnConnect != nil {
- return c.opt.OnConnect(ctx, conn)
- }
- return nil
-}
-
-func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
-
- if isBadConn(err, false, c.opt.Addr) {
- c.connPool.Remove(ctx, cn, err)
- } else {
- c.connPool.Put(ctx, cn)
- }
-}
-
-func (c *baseClient) withConn(
- ctx context.Context, fn func(context.Context, *pool.Conn) error,
-) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
-
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
-
- done := ctx.Done() //nolint:ifshort
-
- if done == nil {
- err = fn(ctx, cn)
- return err
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn(ctx, cn) }()
-
- select {
- case <-done:
- _ = cn.Close()
- // Wait for the goroutine to finish and send something.
- <-errc
-
- err = ctx.Err()
- return err
- case err = <-errc:
- return err
- }
-}
-
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- attempt := attempt
-
- retry, err := c._process(ctx, cmd, attempt)
- if err == nil || !retry {
- return err
- }
-
- lastErr = err
- }
- return lastErr
-}
-
-func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return false, err
- }
- }
-
- retryTimeout := uint32(1)
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- if cmd.readTimeout() == nil {
- atomic.StoreUint32(&retryTimeout, 1)
- }
- return err
- }
-
- return nil
- })
- if err == nil {
- return false, nil
- }
-
- retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
- return retry, err
-}
-
-func (c *baseClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
- if timeout := cmd.readTimeout(); timeout != nil {
- t := *timeout
- if t == 0 {
- return 0
- }
- return t + 10*time.Second
- }
- return c.opt.ReadTimeout
-}
-
-// Close closes the client, releasing any open resources.
-//
-// It is rare to Close a Client, as the Client is meant to be
-// long-lived and shared between many goroutines.
-func (c *baseClient) Close() error {
- var firstErr error
- if c.onClose != nil {
- if err := c.onClose(); err != nil {
- firstErr = err
- }
- }
- if err := c.connPool.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
-}
-
-func (c *baseClient) getAddr() string {
- return c.opt.Addr
-}
-
-func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
-}
-
-func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
-}
-
-type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
-
-func (c *baseClient) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- err := c._generalProcessPipeline(ctx, cmds, p)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- return cmdsFirstErr(cmds)
-}
-
-func (c *baseClient) _generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- var canRetry bool
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- var err error
- canRetry, err = p(ctx, cn, cmds)
- return err
- })
- if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *baseClient) pipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return pipelineReadCmds(rd, cmds)
- })
- return true, err
-}
-
-func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
- if err != nil && !isRedisError(err) {
- return err
- }
- }
- return nil
-}
-
-func (c *baseClient) txPipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := txPipelineReadQueued(rd, statusCmd, cmds)
- if err != nil {
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- return false, err
-}
-
-func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
- if len(cmds) == 0 {
- panic("not reached")
- }
- cmdCopy := make([]Cmder, len(cmds)+2)
- cmdCopy[0] = NewStatusCmd(ctx, "multi")
- copy(cmdCopy[1:], cmds)
- cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
- return cmdCopy
-}
-
-func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for range cmds {
- if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
- return err
- }
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// Client is a Redis client representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-type Client struct {
- *baseClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClient returns a client to the Redis Server specified by Options.
-func NewClient(opt *Options) *Client {
- opt.init()
-
- c := Client{
- baseClient: newBaseClient(opt, newConnPool(opt)),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
-
- return &c
-}
-
-func (c *Client) clone() *Client {
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- return &clone
-}
-
-func (c *Client) WithTimeout(timeout time.Duration) *Client {
- clone := c.clone()
- clone.baseClient = c.baseClient.withTimeout(timeout)
- return clone
-}
-
-func (c *Client) Context() context.Context {
- return c.ctx
-}
-
-func (c *Client) WithContext(ctx context.Context) *Client {
- if ctx == nil {
- panic("nil context")
- }
- clone := c.clone()
- clone.ctx = ctx
- return clone
-}
-
-func (c *Client) Conn(ctx context.Context) *Conn {
- return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Client) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Client) Options() *Options {
- return c.opt
-}
-
-type PoolStats pool.Stats
-
-// PoolStats returns connection pool stats.
-func (c *Client) PoolStats() *PoolStats {
- stats := c.connPool.Stats()
- return (*PoolStats)(stats)
-}
-
-func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Client) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Client) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-// Note that this method does not wait on a response from Redis, so the
-// subscription may not be active immediately. To force the connection to wait,
-// you may call the Receive() method on the returned *PubSub like so:
-//
-// sub := client.Subscribe(queryResp)
-// iface, err := sub.Receive()
-// if err != nil {
-// // handle error
-// }
-//
-// // Should be *Subscription, but others are possible if other actions have been
-// // taken on sub since it was created.
-// switch iface.(type) {
-// case *Subscription:
-// // subscribe succeeded
-// case *Message:
-// // received first message
-// case *Pong:
-// // pong received
-// default:
-// // handle error
-// }
-//
-// ch := sub.Channel()
-func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
- baseClient
- cmdable
- statefulCmdable
- hooks // TODO: inherit hooks
-}
-
-// Conn represents a single Redis connection rather than a pool of connections.
-// Prefer running commands from Client unless there is a specific need
-// for a continuous single Redis connection.
-type Conn struct {
- *conn
- ctx context.Context
-}
-
-func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
- c := Conn{
- conn: &conn{
- baseClient: baseClient{
- opt: opt,
- connPool: connPool,
- },
- },
- ctx: ctx,
- }
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
- return &c
-}
-
-func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Conn) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Conn) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf b/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
deleted file mode 100644
index 235b295..0000000
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/testdata/redis.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Minimal redis.conf
-
-port 6379
-daemonize no
-dir .
-save ""
-appendonly yes
-cluster-config-file nodes.conf
-cluster-node-timeout 30000
-maxclients 1001 \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go
deleted file mode 100644
index 3a4ec25..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build gofuzz
-// +build gofuzz
-
-package mysql
-
-import (
- "database/sql"
-)
-
-func Fuzz(data []byte) int {
- db, err := sql.Open("mysql", string(data))
- if err != nil {
- return 0
- }
- db.Close()
- return 1
-}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod
deleted file mode 100644
index 2511104..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/go-sql-driver/mysql
-
-go 1.13
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go
deleted file mode 100644
index c6438d0..0000000
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/result.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-type mysqlResult struct {
- affectedRows int64
- insertId int64
-}
-
-func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
-}
-
-func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
-}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md
index 8fe16bc..8fe16bc 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/CONTRIBUTING.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/CONTRIBUTING.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md
index d9771f1..d9771f1 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/ISSUE_TEMPLATE.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/ISSUE_TEMPLATE.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md
index 6f5c7eb..6f5c7eb 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/PULL_REQUEST_TEMPLATE.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/PULL_REQUEST_TEMPLATE.md
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml
index d9d29a8..83a3d6e 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/codeql.yml
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/codeql.yml
@@ -24,18 +24,18 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Initialize CodeQL
- uses: github/codeql-action/init@v2
+ uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
queries: +security-and-quality
- name: Autobuild
- uses: github/codeql-action/autobuild@v2
+ uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ uses: github/codeql-action/analyze@v3
with:
category: "/language:${{ matrix.language }}"
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml
index d45ed0f..f5a1158 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.github/workflows/test.yml
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.github/workflows/test.yml
@@ -11,6 +11,14 @@ env:
MYSQL_TEST_CONCURRENT: 1
jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dominikh/staticcheck-action@v1.3.0
+ with:
+ version: "2023.1.6"
+
list:
runs-on: ubuntu-latest
outputs:
@@ -23,17 +31,14 @@ jobs:
import os
go = [
# Keep the most recent production release at the top
- '1.20',
+ '1.21',
# Older production releases
+ '1.20',
'1.19',
'1.18',
- '1.17',
- '1.16',
- '1.15',
- '1.14',
- '1.13',
]
mysql = [
+ '8.1',
'8.0',
'5.7',
'5.6',
@@ -47,7 +52,7 @@ jobs:
includes = []
# Go versions compatibility check
for v in go[1:]:
- includes.append({'os': 'ubuntu-latest', 'go': v, 'mysql': mysql[0]})
+ includes.append({'os': 'ubuntu-latest', 'go': v, 'mysql': mysql[0]})
matrix = {
# OS vs MySQL versions
@@ -68,11 +73,11 @@ jobs:
fail-fast: false
matrix: ${{ fromJSON(needs.list.outputs.matrix) }}
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-go@v3
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- - uses: shogo82148/actions-setup-mysql@v1.15.0
+ - uses: shogo82148/actions-setup-mysql@v1
with:
mysql-version: ${{ matrix.mysql }}
user: ${{ env.MYSQL_TEST_USER }}
@@ -84,13 +89,14 @@ jobs:
; TestConcurrent fails if max_connections is too large
max_connections=50
local_infile=1
+ performance_schema=on
- name: setup database
run: |
mysql --user 'root' --host '127.0.0.1' -e 'create database gotest;'
- name: test
run: |
- go test -v '-covermode=count' '-coverprofile=coverage.out'
+ go test -v '-race' '-covermode=atomic' '-coverprofile=coverage.out' -parallel 10
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore
index 2de28da..2de28da 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/.gitignore
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/.gitignore
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS
index fb1478c..4021b96 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/AUTHORS
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/AUTHORS
@@ -13,6 +13,7 @@
Aaron Hopkins <go-sql-driver at die.net>
Achille Roussel <achille.roussel at gmail.com>
+Aidan <aidan.liu at pingcap.com>
Alex Snast <alexsn at fb.com>
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
Andrew Reid <andrew.reid at tixtrack.com>
@@ -20,12 +21,14 @@ Animesh Ray <mail.rayanimesh at gmail.com>
Arne Hormann <arnehormann at gmail.com>
Ariel Mashraki <ariel at mashraki.co.il>
Asta Xie <xiemengjun at gmail.com>
+Brian Hendriks <brian at dolthub.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Caine Jette <jette at alum.mit.edu>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Kirkland <chriskirkland at github.com>
Chris Moos <chris at tech9computers.com>
Craig Wilson <craiggwilson at gmail.com>
+Daemonxiao <735462752 at qq.com>
Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
@@ -33,9 +36,11 @@ Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
+Evan Elias <evan at skeema.net>
Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
+Gusted <postmaster at gusted.xyz>
Hajime Nakagami <nakagami at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
@@ -47,8 +52,11 @@ INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
Janek Vedock <janekvedock at comcast.net>
+Jason Ng <oblitorum at gmail.com>
+Jean-Yves Pellé <jy at pelle.link>
Jeff Hodges <jeff at somethingsimilar.com>
Jeffrey Charles <jeffreycharles at gmail.com>
+Jennifer Purevsuren <jennifer at dolthub.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
@@ -74,9 +82,11 @@ Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
+Oliver Bone <owbone at github.com>
Olivier Mengué <dolmen at cpan.org>
oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
+Paulius Lozys <pauliuslozys at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Phil Porada <philporada at gmail.com>
Rebecca Chin <rchin at pivotal.io>
@@ -95,6 +105,7 @@ Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
Steven Hartland <steven.hartland at multiplay.co.uk>
Tan Jinhua <312841925 at qq.com>
+Tetsuro Aoki <t.aoki1130 at gmail.com>
Thomas Wodarek <wodarekwebpage at gmail.com>
Tim Ruffles <timruffles at gmail.com>
Tom Jenkinson <tom at tjenkinson.me>
@@ -104,6 +115,7 @@ Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Xuehong Chan <chanxuehong at gmail.com>
+Zhang Xiang <angwerzx at 126.com>
Zhenye Xie <xiezhenye at gmail.com>
Zhixin Wen <john.wenzhixin at gmail.com>
Ziheng Lyu <zihenglv at gmail.com>
@@ -113,14 +125,18 @@ Ziheng Lyu <zihenglv at gmail.com>
Barracuda Networks, Inc.
Counting Ltd.
DigitalOcean Inc.
+Dolthub Inc.
dyves labs AG
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
+Microsoft Corp.
Multiplay Ltd.
Percona LLC
+PingCAP Inc.
Pivotal Inc.
+Shattered Silicon Ltd.
Stripe Inc.
Zendesk Inc.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md
index 5166e4a..0c9bd9b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/CHANGELOG.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/CHANGELOG.md
@@ -1,3 +1,45 @@
+## Version 1.8.1 (2024-03-26)
+
+Bugfixes:
+
+- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
+
+## Version 1.8.0 (2024-03-09)
+
+Major Changes:
+
+- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
+ - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
+ - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
+ - If you specify charset, go-mysql-driver sends `SET NAMES <charset>`. This uses the server's default collation for `<charset>`.
+ - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
+- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
+ - This is backward incompatible in rare case. Check your DSN.
+- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
+ - Use Go 1.18+
+- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
+ - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
+ - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
+ - This confused users because most user doesn't know when text/binary protocol used.
+ - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
+- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
+ - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
+ - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
+
+
+Other changes:
+
+- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
+- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
+- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
+- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
+- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
+- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
+- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
+- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
+- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
+- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
+
## Version 1.7.1 (2023-04-25)
Changes:
@@ -162,7 +204,7 @@ New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
@@ -206,7 +248,7 @@ Changes:
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+ - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
New Features:
@@ -254,7 +296,7 @@ Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
+ - Split packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE
index 14e2f77..14e2f77 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/LICENSE
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/LICENSE
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md
index 3b5d229..4968cb0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/README.md
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/README.md
@@ -40,15 +40,23 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Optional placeholder interpolation
## Requirements
- * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
+* MySQL (5.7+) and MariaDB (10.3+) are supported.
+* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
+ * Do not ask questions about TiDB in our issue tracker or forum.
+ * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
+ * [Forum](https://ask.pingcap.com/)
+* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
+ * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
+ * Investigate issues yourself and please send a pull request to fix it.
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get -u github.com/go-sql-driver/mysql
+go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -114,6 +122,12 @@ This has the same effect as an empty DSN string:
```
+`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
+
+```
+/dbname%2Fwithslash
+```
+
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
@@ -121,7 +135,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+In general you should use a Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
@@ -145,7 +159,7 @@ Default: false
```
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
##### `allowCleartextPasswords`
@@ -194,10 +208,9 @@ Valid Values: <name>
Default: none
```
-Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
+See also [Unicode Support](#unicode-support).
##### `checkConnLiveness`
@@ -226,6 +239,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+See also [Unicode Support](#unicode-support).
##### `clientFoundRows`
@@ -279,6 +293,15 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `timeTruncate`
+
+```
+Type: duration
+Default: 0
+```
+
+[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `maxAllowedPacket`
```
Type: decimal number
@@ -295,9 +318,25 @@ Valid Values: true, false
Default: false
```
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
+
+It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
+```go
+conn, _ := db.Conn(ctx)
+conn.Raw(func(conn any) error {
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ UPDATE point SET x = 1 WHERE y = 2;
+ UPDATE point SET x = 2 WHERE y = 3;
+ `, nil)
+ // Both slices have 2 elements.
+ log.Print(res.(mysql.Result).AllRowsAffected())
+ log.Print(res.(mysql.Result).AllLastInsertIds())
+})
+```
##### `parseTime`
@@ -393,6 +432,15 @@ Default: 0
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+##### `connectionAttributes`
+
+```
+Type: comma-delimited string of user-defined "key:value" pairs
+Valid Values: (<name1>:<value1>,<name2>:<value2>,...)
+Default: none
+```
+
+[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
##### System Variables
@@ -465,7 +513,7 @@ user:password@/
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
@@ -478,7 +526,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -496,9 +544,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+- When only the `charset` is specified, the `SET NAMES <charset>` query is sent and the server's default collation is used.
+- When both the `charset` and `collation` are specified, the `SET NAMES <charset> COLLATE <collation>` query is sent.
+- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go
index 1b7e19f..1b7e19f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go
index 2e9a7f0..2e9a7f0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_go118.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_go118.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go
index a3b4ea0..a3b4ea0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/atomic_bool_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/atomic_bool_test.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go
index 1ff203e..74e1bd0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth.go
@@ -13,10 +13,13 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
+
+ "filippo.io/edwards25519"
)
// server pub keys registry
@@ -33,7 +36,7 @@ var (
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
-// data, err := ioutil.ReadFile("mykey.pem")
+// data, err := os.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
+// authEd25519 does ed25519 authentication used by MariaDB.
+func authEd25519(scramble []byte, password string) ([]byte, error) {
+ // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
+ // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
+ h := sha512.Sum512([]byte(password))
+
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ return nil, err
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ mh := sha512.New()
+ mh.Write(h[32:])
+ mh.Write(scramble)
+ messageDigest := mh.Sum(nil)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(A.Bytes())
+ kh.Write(scramble)
+ hramDigest := kh.Sum(nil)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ S := k.MultiplyAdd(k, s, r)
+
+ return append(R.Bytes(), S.Bytes()...), nil
+}
+
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
@@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
+ case "client_ed25519":
+ if len(authData) != 32 {
+ return nil, ErrMalformPkt
+ }
+ return authEd25519(authData, mc.cfg.Passwd)
+
default:
- errLog.Print("unknown auth plugin:", plugin)
+ mc.log("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
@@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
switch plugin {
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
@@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
+ if err = mc.resultUnchanged().readResultOK(); err == nil {
return nil // auth successful
}
@@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
}
if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
}
// parse public key
block, rest := pem.Decode(data[1:])
if block == nil {
- return fmt.Errorf("No Pem data found, data: %s", rest)
+ return fmt.Errorf("no pem data found, data: %s", rest)
}
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
@@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
default:
return ErrMalformPkt
@@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
if err != nil {
return err
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
}
default:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go
index 3ce0ea6..8caed1f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/auth_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/auth_test.go
@@ -1328,3 +1328,54 @@ func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
t.Errorf("got unexpected data: %v", conn.written)
}
}
+
+// Derived from https://github.com/MariaDB/server/blob/6b2287fff23fbdc362499501c562f01d0d2db52e/plugin/auth_ed25519/ed25519-t.c
+func TestEd25519Auth(t *testing.T) {
+ conn, mc := newRWMockConn(1)
+ mc.cfg.User = "root"
+ mc.cfg.Passwd = "foobar"
+
+ authData := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
+ plugin := "client_ed25519"
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = mc.writeHandshakeResponsePacket(authResp, plugin)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check written auth response
+ authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
+ authRespEnd := authRespStart + 1 + len(authResp)
+ writtenAuthRespLen := conn.written[authRespStart]
+ writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
+ expectedAuthResp := []byte{
+ 232, 61, 201, 63, 67, 63, 51, 53, 86, 73, 238, 35, 170, 117, 146,
+ 214, 26, 17, 35, 9, 8, 132, 245, 141, 48, 99, 66, 58, 36, 228, 48,
+ 84, 115, 254, 187, 168, 88, 162, 249, 57, 35, 85, 79, 238, 167, 106,
+ 68, 117, 56, 135, 171, 47, 20, 14, 133, 79, 15, 229, 124, 160, 176,
+ 100, 138, 14,
+ }
+ if writtenAuthRespLen != 64 {
+ t.Fatalf("expected 64 bytes from client, got %d", writtenAuthRespLen)
+ }
+ if !bytes.Equal(writtenAuthResp, expectedAuthResp) {
+ t.Fatalf("auth response did not match expected value:\n%v\n%v", writtenAuthResp, expectedAuthResp)
+ }
+ conn.written = nil
+
+ // auth response
+ conn.data = []byte{
+ 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
+ }
+ conn.maxReads = 1
+
+ // Handle response to auth packet
+ if err := mc.handleAuthResult(authData, plugin); err != nil {
+ t.Errorf("got error: %v", err)
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go
index 97ed781..a4ecc0a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/benchmark_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/benchmark_test.go
@@ -48,7 +48,7 @@ func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
func initDB(b *testing.B, queries ...string) *sql.DB {
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
b.Fatalf("error on %q: %v", query, err)
@@ -105,7 +105,7 @@ func BenchmarkExec(b *testing.B) {
tb := (*TB)(b)
b.StopTimer()
b.ReportAllocs()
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
db.SetMaxIdleConns(concurrencyLevel)
defer db.Close()
@@ -151,7 +151,7 @@ func BenchmarkRoundtripTxt(b *testing.B) {
sampleString := string(sample)
b.ReportAllocs()
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
b.StartTimer()
var result string
@@ -184,7 +184,7 @@ func BenchmarkRoundtripBin(b *testing.B) {
sample, min, max := initRoundtripBenchmarks()
b.ReportAllocs()
tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
+ db := tb.checkDB(sql.Open(driverNameTest, dsn))
defer db.Close()
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
defer stmt.Close()
@@ -372,3 +372,59 @@ func BenchmarkQueryRawBytes(b *testing.B) {
})
}
}
+
+// BenchmarkReceiveMassiveRows measures performance of receiving large number of rows.
+func BenchmarkReceiveMassiveRows(b *testing.B) {
+ // Setup -- prepare 10000 rows.
+ db := initDB(b,
+ "DROP TABLE IF EXISTS foo",
+ "CREATE TABLE foo (id INT PRIMARY KEY, val TEXT)")
+ defer db.Close()
+
+ sval := strings.Repeat("x", 50)
+ stmt, err := db.Prepare(`INSERT INTO foo (id, val) VALUES (?, ?)` + strings.Repeat(",(?,?)", 99))
+ if err != nil {
+ b.Errorf("failed to prepare query: %v", err)
+ return
+ }
+ for i := 0; i < 10000; i += 100 {
+ args := make([]any, 200)
+ for j := 0; j < 100; j++ {
+ args[j*2] = i + j
+ args[j*2+1] = sval
+ }
+ _, err := stmt.Exec(args...)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ }
+ stmt.Close()
+
+ // Use b.Run() to skip expensive setup.
+ b.Run("query", func(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ rows, err := db.Query(`SELECT id, val FROM foo`)
+ if err != nil {
+ b.Errorf("failed to select: %v", err)
+ return
+ }
+ for rows.Next() {
+ var i int
+ var s sql.RawBytes
+ err = rows.Scan(&i, &s)
+ if err != nil {
+ b.Errorf("failed to scan: %v", err)
+ _ = rows.Close()
+ return
+ }
+ }
+ if err = rows.Err(); err != nil {
+ b.Errorf("failed to read rows: %v", err)
+ }
+ _ = rows.Close()
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go
index 0774c5c..0774c5c 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/buffer.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/buffer.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go
index 295bfbe..1cdf97b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/collations.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/collations.go
@@ -9,7 +9,7 @@
package mysql
const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
+const binaryCollationID = 63
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go
index 0ea7217..0ea7217 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go
index a56c138..a56c138 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_dummy.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_dummy.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go
index f7e0256..6b60cb7 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/conncheck_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/conncheck_test.go
@@ -17,7 +17,7 @@ import (
)
func TestStaleConnectionChecks(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
dbt.mustExec("SET @@SESSION.wait_timeout = 2")
if err := dbt.db.Ping(); err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go
index 947a883..eff978d 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection.go
@@ -23,10 +23,10 @@ import (
type mysqlConn struct {
buf buffer
netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ result mysqlResult // managed by clearResult() and handleOkPacket().
cfg *Config
+ connector *connector
maxAllowedPacket int
maxWriteSize int
writeTimeout time.Duration
@@ -34,7 +34,6 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
// for context support (Go 1.8+)
watching bool
@@ -45,17 +44,27 @@ type mysqlConn struct {
closed atomicBool // set when conn is closed, before closech is closed
}
+// Helper function to call per-connection logger.
+func (mc *mysqlConn) log(v ...any) {
+ mc.cfg.Logger.Print(v...)
+}
+
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
+
for param, val := range mc.cfg.Params {
switch param {
// Charset: character_set_connection, character_set_client, character_set_results
case "charset":
charsets := strings.Split(val, ",")
- for i := range charsets {
+ for _, cs := range charsets {
// ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
+ if mc.cfg.Collation != "" {
+ err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
+ } else {
+ err = mc.exec("SET NAMES " + cs)
+ }
if err == nil {
break
}
@@ -68,7 +77,7 @@ func (mc *mysqlConn) handleParams() (err error) {
default:
if cmdSet.Len() == 0 {
// Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
cmdSet.WriteString("SET ")
} else {
cmdSet.WriteString(", ")
@@ -105,7 +114,7 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@@ -128,7 +137,7 @@ func (mc *mysqlConn) Close() (err error) {
}
mc.cleanup()
-
+ mc.clearResult()
return
}
@@ -143,12 +152,16 @@ func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
close(mc.closech)
- if mc.netConn == nil {
+ conn := mc.rawConn
+ if conn == nil {
return
}
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ if err := conn.Close(); err != nil {
+ mc.log(err)
}
+ // This function can be called from multiple goroutines.
+ // So we can not mc.clearResult() here.
+ // Caller should do it if they are in safe goroutine.
}
func (mc *mysqlConn) error() error {
@@ -163,14 +176,14 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- errLog.Print(err)
+ mc.log(err)
return nil, driver.ErrBadConn
}
@@ -204,7 +217,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return "", ErrInvalidConn
}
buf = buf[:0]
@@ -246,7 +259,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf = append(buf, "'0000-00-00'"...)
} else {
buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return "", err
}
@@ -296,7 +309,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -310,28 +323,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
}
query = prepared
}
- mc.affectedRows = 0
- mc.insertId = 0
err := mc.exec(query)
if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ copied := mc.result
+ return &copied, err
}
return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
+ handleOk := mc.clearResult()
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return mc.markBadConn(err)
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return err
}
@@ -348,7 +358,7 @@ func (mc *mysqlConn) exec(query string) error {
}
}
- return mc.discardResults()
+ return handleOk.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -356,8 +366,10 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ handleOk := mc.clearResult()
+
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -376,7 +388,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
if err == nil {
// Read Result
var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
+ resLen, err = handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -404,12 +416,13 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
+ handleOk := mc.clearResult()
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -451,7 +464,7 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -460,11 +473,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
}
defer mc.finish()
+ handleOk := mc.clearResult()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
- return mc.readResultOK()
+ return handleOk.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
@@ -639,7 +653,31 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error {
if mc.closed.Load() {
return driver.ErrBadConn
}
- mc.reset = true
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.cfg.CheckConnLiveness {
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ mc.log("closing bad idle connection: ", err)
+ return driver.ErrBadConn
+ }
+ }
+
return nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go
index b6764a2..98c985a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connection_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connection_test.go
@@ -179,6 +179,7 @@ func TestPingErrInvalidConn(t *testing.T) {
buf: newBuffer(nc),
maxAllowedPacket: defaultMaxAllowedPacket,
closech: make(chan struct{}),
+ cfg: NewConfig(),
}
err := ms.Ping(context.Background())
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go
index d567b4e..b670775 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector.go
@@ -12,10 +12,53 @@ import (
"context"
"database/sql/driver"
"net"
+ "os"
+ "strconv"
+ "strings"
)
type connector struct {
- cfg *Config // immutable private copy.
+ cfg *Config // immutable private copy.
+ encodedAttributes string // Encoded connection attributes.
+}
+
+func encodeConnectionAttributes(cfg *Config) string {
+ connAttrsBuf := make([]byte, 0)
+
+ // default connection attributes
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
+ serverHost, _, _ := net.SplitHostPort(cfg.Addr)
+ if serverHost != "" {
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
+ }
+
+ // user-defined connection attributes
+ for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
+ k, v, found := strings.Cut(connAttr, ":")
+ if !found {
+ continue
+ }
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
+ }
+
+ return string(connAttrsBuf)
+}
+
+func newConnector(cfg *Config) *connector {
+ encodedAttributes := encodeConnectionAttributes(cfg)
+ return &connector{
+ cfg: cfg,
+ encodedAttributes: encodedAttributes,
+ }
}
// Connect implements driver.Connector interface.
@@ -23,12 +66,23 @@ type connector struct {
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
+ // Invoke beforeConnect if present, with a copy of the configuration
+ cfg := c.cfg
+ if c.cfg.beforeConnect != nil {
+ cfg = c.cfg.Clone()
+ err = c.cfg.beforeConnect(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
- cfg: c.cfg,
+ cfg: cfg,
+ connector: c,
}
mc.parseTime = mc.cfg.ParseTime
@@ -48,18 +102,15 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
}
-
if err != nil {
return nil, err
}
+ mc.rawConn = mc.netConn
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
+ c.cfg.Logger.Print(err)
}
}
@@ -92,7 +143,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go
index 976903c..82d8c59 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/connector_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/connector_test.go
@@ -8,11 +8,11 @@ import (
)
func TestConnectorReturnsTimeout(t *testing.T) {
- connector := &connector{&Config{
+ connector := newConnector(&Config{
Net: "tcp",
Addr: "1.1.1.1:1234",
Timeout: 10 * time.Millisecond,
- }}
+ })
_, err := connector.Connect(context.Background())
if err == nil {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go
index 64e2bce..22526e0 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/const.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/const.go
@@ -8,12 +8,25 @@
package mysql
+import "runtime"
+
const (
defaultAuthPlugin = "mysql_native_password"
defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
+
+ // Connection attributes
+ // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
+ connAttrClientName = "_client_name"
+ connAttrClientNameValue = "Go-MySQL-Driver"
+ connAttrOS = "_os"
+ connAttrOSValue = runtime.GOOS
+ connAttrPlatform = "_platform"
+ connAttrPlatformValue = runtime.GOARCH
+ connAttrPid = "_pid"
+ connAttrServerHost = "_server_host"
)
// MySQL constants documentation:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go
index ad7aec2..105316b 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver.go
@@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) {
dials[net] = dial
}
+// DeregisterDialContext removes the custom dial function registered with the given net.
+func DeregisterDialContext(net string) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials != nil {
+ delete(dials, net)
+ }
+}
+
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
@@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, err
}
- c := &connector{
- cfg: cfg,
- }
+ c := newConnector(cfg)
return c.Connect(context.Background())
}
+// This variable can be replaced with -ldflags like below:
+// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
+var driverName = "mysql"
+
func init() {
- sql.Register("mysql", &MySQLDriver{})
+ if driverName != "" {
+ sql.Register(driverName, &MySQLDriver{})
+ }
}
// NewConnector returns new driver.Connector.
@@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) {
if err := cfg.normalize(); err != nil {
return nil, err
}
- return &connector{cfg: cfg}, nil
+ return newConnector(cfg), nil
}
// OpenConnector implements driver.DriverContext.
@@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
if err != nil {
return nil, err
}
- return &connector{
- cfg: cfg,
- }, nil
+ return newConnector(cfg), nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go
index a1c7767..4fd196d 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/driver_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/driver_test.go
@@ -11,20 +11,22 @@ package mysql
import (
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"log"
"math"
+ mrand "math/rand"
"net"
"net/url"
"os"
"reflect"
"runtime"
+ "strconv"
"strings"
"sync"
"sync/atomic"
@@ -32,6 +34,16 @@ import (
"time"
)
+// This variable can be replaced with -ldflags like below:
+// go test "-ldflags=-X github.com/go-sql-driver/mysql.driverNameTest=custom"
+var driverNameTest string
+
+func init() {
+ if driverNameTest == "" {
+ driverNameTest = driverName
+ }
+}
+
// Ensure that all the driver interfaces are implemented
var (
_ driver.Rows = &binaryRows{}
@@ -83,7 +95,7 @@ func init() {
}
type DBTest struct {
- *testing.T
+ testing.TB
db *sql.DB
}
@@ -112,12 +124,14 @@ func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBT
dsn += "&multiStatements=true"
var db *sql.DB
if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
- db, err = sql.Open("mysql", dsn)
+ db, err = sql.Open(driverNameTest, dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
}
+ // Previous test may be skipped without dropping the test table
+ db.Exec("DROP TABLE IF EXISTS test")
dbt := &DBTest{t, db}
for _, test := range tests {
@@ -131,59 +145,111 @@ func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
t.Skipf("MySQL server not running on %s", netAddr)
}
- db, err := sql.Open("mysql", dsn)
+ db, err := sql.Open(driverNameTest, dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
- db.Exec("DROP TABLE IF EXISTS test")
+ cleanup := func() {
+ db.Exec("DROP TABLE IF EXISTS test")
+ }
dsn2 := dsn + "&interpolateParams=true"
var db2 *sql.DB
if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
- db2, err = sql.Open("mysql", dsn2)
+ db2, err = sql.Open(driverNameTest, dsn2)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db2.Close()
}
- dsn3 := dsn + "&multiStatements=true"
- var db3 *sql.DB
- if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
- db3, err = sql.Open("mysql", dsn3)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
+ for _, test := range tests {
+ test := test
+ t.Run("default", func(t *testing.T) {
+ dbt := &DBTest{t, db}
+ t.Cleanup(cleanup)
+ test(dbt)
+ })
+ if db2 != nil {
+ t.Run("interpolateParams", func(t *testing.T) {
+ dbt2 := &DBTest{t, db2}
+ t.Cleanup(cleanup)
+ test(dbt2)
+ })
}
- defer db3.Close()
}
+}
- dbt := &DBTest{t, db}
- dbt2 := &DBTest{t, db2}
- dbt3 := &DBTest{t, db3}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- if db2 != nil {
- test(dbt2)
- dbt2.db.Exec("DROP TABLE IF EXISTS test")
+// runTestsParallel runs the tests in parallel with a separate database connection for each test.
+func runTestsParallel(t *testing.T, dsn string, tests ...func(dbt *DBTest, tableName string)) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ newTableName := func(t *testing.T) string {
+ t.Helper()
+ var buf [8]byte
+ if _, err := rand.Read(buf[:]); err != nil {
+ t.Fatal(err)
}
- if db3 != nil {
- test(dbt3)
- dbt3.db.Exec("DROP TABLE IF EXISTS test")
+ return fmt.Sprintf("test_%x", buf[:])
+ }
+
+ t.Parallel()
+ for _, test := range tests {
+ test := test
+
+ t.Run("default", func(t *testing.T) {
+ t.Parallel()
+
+ tableName := newTableName(t)
+ db, err := sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ t.Cleanup(func() {
+ db.Exec("DROP TABLE IF EXISTS " + tableName)
+ db.Close()
+ })
+
+ dbt := &DBTest{t, db}
+ test(dbt, tableName)
+ })
+
+ dsn2 := dsn + "&interpolateParams=true"
+ if _, err := ParseDSN(dsn2); err == errInvalidDSNUnsafeCollation {
+ t.Run("interpolateParams", func(t *testing.T) {
+ t.Parallel()
+
+ tableName := newTableName(t)
+ db, err := sql.Open("mysql", dsn2)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ t.Cleanup(func() {
+ db.Exec("DROP TABLE IF EXISTS " + tableName)
+ db.Close()
+ })
+
+ dbt := &DBTest{t, db}
+ test(dbt, tableName)
+ })
}
}
}
func (dbt *DBTest) fail(method, query string, err error) {
+ dbt.Helper()
if len(query) > 300 {
query = "[query too large to print]"
}
dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
}
-func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
+func (dbt *DBTest) mustExec(query string, args ...any) (res sql.Result) {
+ dbt.Helper()
res, err := dbt.db.Exec(query, args...)
if err != nil {
dbt.fail("exec", query, err)
@@ -191,7 +257,8 @@ func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result)
return res
}
-func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
+func (dbt *DBTest) mustQuery(query string, args ...any) (rows *sql.Rows) {
+ dbt.Helper()
rows, err := dbt.db.Query(query, args...)
if err != nil {
dbt.fail("query", query, err)
@@ -211,7 +278,7 @@ func maybeSkip(t *testing.T, err error, skipErrno uint16) {
}
func TestEmptyQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
// just a comment, no query
rows := dbt.mustQuery("--")
defer rows.Close()
@@ -223,20 +290,20 @@ func TestEmptyQuery(t *testing.T) {
}
func TestCRUD(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
// Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value BOOL)")
// Test for unexpected data
var out bool
- rows := dbt.mustQuery("SELECT * FROM test")
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
if rows.Next() {
dbt.Error("unexpected data in empty table")
}
rows.Close()
// Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1)")
+ res := dbt.mustExec("INSERT INTO " + tbl + " VALUES (1)")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -254,7 +321,7 @@ func TestCRUD(t *testing.T) {
}
// Read
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if true != out {
@@ -270,7 +337,7 @@ func TestCRUD(t *testing.T) {
rows.Close()
// Update
- res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
+ res = dbt.mustExec("UPDATE "+tbl+" SET value = ? WHERE value = ?", false, true)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -280,7 +347,7 @@ func TestCRUD(t *testing.T) {
}
// Check Update
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if false != out {
@@ -296,7 +363,7 @@ func TestCRUD(t *testing.T) {
rows.Close()
// Delete
- res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
+ res = dbt.mustExec("DELETE FROM "+tbl+" WHERE value = ?", false)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -306,7 +373,7 @@ func TestCRUD(t *testing.T) {
}
// Check for unexpected rows
- res = dbt.mustExec("DELETE FROM test")
+ res = dbt.mustExec("DELETE FROM " + tbl)
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -317,6 +384,51 @@ func TestCRUD(t *testing.T) {
})
}
+// TestNumbers test that selecting numeric columns.
+// Both of textRows and binaryRows should return same type and value.
+func TestNumbersToAny(t *testing.T) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT PRIMARY KEY, b BOOL, i8 TINYINT, " +
+ "i16 SMALLINT, i32 INT, i64 BIGINT, f32 FLOAT, f64 DOUBLE, iu32 INT UNSIGNED)")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1, true, 127, 32767, 2147483647, 9223372036854775807, 1.25, 2.5, 4294967295)")
+
+ // Use binaryRows for interpolateParams=false and textRows for interpolateParams=true.
+ rows := dbt.mustQuery("SELECT b, i8, i16, i32, i64, f32, f64, iu32 FROM "+tbl+" WHERE id=?", 1)
+ if !rows.Next() {
+ dbt.Fatal("no data")
+ }
+ var b, i8, i16, i32, i64, f32, f64, iu32 any
+ err := rows.Scan(&b, &i8, &i16, &i32, &i64, &f32, &f64, &iu32)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ if b.(int64) != 1 {
+ dbt.Errorf("b != 1")
+ }
+ if i8.(int64) != 127 {
+ dbt.Errorf("i8 != 127")
+ }
+ if i16.(int64) != 32767 {
+ dbt.Errorf("i16 != 32767")
+ }
+ if i32.(int64) != 2147483647 {
+ dbt.Errorf("i32 != 2147483647")
+ }
+ if i64.(int64) != 9223372036854775807 {
+ dbt.Errorf("i64 != 9223372036854775807")
+ }
+ if f32.(float32) != 1.25 {
+ dbt.Errorf("f32 != 1.25")
+ }
+ if f64.(float64) != 2.5 {
+ dbt.Errorf("f64 != 2.5")
+ }
+ if iu32.(int64) != 4294967295 {
+ dbt.Errorf("iu32 != 4294967295")
+ }
+ })
+}
+
func TestMultiQuery(t *testing.T) {
runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
// Create Table
@@ -347,8 +459,8 @@ func TestMultiQuery(t *testing.T) {
rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
if rows.Next() {
rows.Scan(&out)
- if 5 != out {
- dbt.Errorf("5 != %d", out)
+ if out != 5 {
+ dbt.Errorf("expected 5, got %d", out)
}
if rows.Next() {
@@ -363,7 +475,7 @@ func TestMultiQuery(t *testing.T) {
}
func TestInt(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
in := int64(42)
var out int64
@@ -371,11 +483,11 @@ func TestInt(t *testing.T) {
// SIGNED
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -386,16 +498,16 @@ func TestInt(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
// UNSIGNED ZEROFILL
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + " ZEROFILL)")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -406,21 +518,21 @@ func TestInt(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat32(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
in := float32(42.23)
var out float32
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -430,21 +542,21 @@ func TestFloat32(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat64(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
var expected float64 = 42.23
var out float64
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (42.23)")
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ")")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (42.23)")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if expected != out {
@@ -454,21 +566,21 @@ func TestFloat64(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestFloat64Placeholder(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [2]string{"FLOAT", "DOUBLE"}
var expected float64 = 42.23
var out float64
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
- rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
+ dbt.mustExec("CREATE TABLE " + tbl + " (id int, value " + v + ")")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1, 42.23)")
+ rows = dbt.mustQuery("SELECT value FROM "+tbl+" WHERE id = ?", 1)
if rows.Next() {
rows.Scan(&out)
if expected != out {
@@ -478,24 +590,24 @@ func TestFloat64Placeholder(t *testing.T) {
dbt.Errorf("%s: no data", v)
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
})
}
func TestString(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
var out string
var rows *sql.Rows
for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value " + v + ") CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in != out {
@@ -506,11 +618,11 @@ func TestString(t *testing.T) {
}
rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
}
// BLOB
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
+ dbt.mustExec("CREATE TABLE " + tbl + " (id int, value BLOB) CHARACTER SET utf8")
id := 2
in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
@@ -521,9 +633,9 @@ func TestString(t *testing.T) {
"sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
"sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
"Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?, ?)", id, in)
- err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
+ err := dbt.db.QueryRow("SELECT value FROM "+tbl+" WHERE id = ?", id).Scan(&out)
if err != nil {
dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
} else if out != in {
@@ -533,7 +645,7 @@ func TestString(t *testing.T) {
}
func TestRawBytes(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
v1 := []byte("aaa")
v2 := []byte("bbb")
rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
@@ -562,7 +674,7 @@ func TestRawBytes(t *testing.T) {
}
func TestRawMessage(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
v1 := json.RawMessage("{}")
v2 := json.RawMessage("[]")
rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
@@ -593,14 +705,14 @@ func (tv testValuer) Value() (driver.Value, error) {
}
func TestValuer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
in := testValuer{"a_value"}
var out string
var rows *sql.Rows
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
if rows.Next() {
rows.Scan(&out)
if in.value != out {
@@ -610,8 +722,6 @@ func TestValuer(t *testing.T) {
dbt.Errorf("Valuer: no data")
}
rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
})
}
@@ -628,15 +738,15 @@ func (tv testValuerWithValidation) Value() (driver.Value, error) {
}
func TestValuerWithValidation(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
in := testValuerWithValidation{"a_value"}
var out string
var rows *sql.Rows
- dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM testValuer")
+ rows = dbt.mustQuery("SELECT value FROM " + tbl)
defer rows.Close()
if rows.Next() {
@@ -648,19 +758,17 @@ func TestValuerWithValidation(t *testing.T) {
dbt.Errorf("Valuer: no data")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", testValuerWithValidation{""}); err == nil {
dbt.Errorf("Failed to check valuer error")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", nil); err != nil {
dbt.Errorf("Failed to check nil")
}
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
+ if _, err := dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", map[string]bool{}); err == nil {
dbt.Errorf("Failed to check not valuer")
}
-
- dbt.mustExec("DROP TABLE IF EXISTS testValuer")
})
}
@@ -737,7 +845,7 @@ func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
return
}
- var dst interface{}
+ var dst any
err = rows.Scan(&dst)
if err != nil {
dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
@@ -768,7 +876,7 @@ func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
t.s, val.Format(tlayout),
)
default:
- fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
+ fmt.Printf("%#v\n", []any{dbtype, tlayout, mode, t.s, t.t})
dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
dbtype, mode,
val, val,
@@ -894,7 +1002,7 @@ func TestTimestampMicros(t *testing.T) {
f0 := format[:19]
f1 := format[:21]
f6 := format[:26]
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
// check if microseconds are supported.
// Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
// and not precision.
@@ -909,7 +1017,7 @@ func TestTimestampMicros(t *testing.T) {
return
}
_, err := dbt.db.Exec(`
- CREATE TABLE test (
+ CREATE TABLE ` + tbl + ` (
value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
@@ -918,10 +1026,10 @@ func TestTimestampMicros(t *testing.T) {
if err != nil {
dbt.Error(err)
}
- defer dbt.mustExec("DROP TABLE IF EXISTS test")
- dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
+ defer dbt.mustExec("DROP TABLE IF EXISTS " + tbl)
+ dbt.mustExec("INSERT INTO "+tbl+" SET value0=?, value1=?, value6=?", f0, f1, f6)
var res0, res1, res6 string
- rows := dbt.mustQuery("SELECT * FROM test")
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
defer rows.Close()
if !rows.Next() {
dbt.Errorf("test contained no selectable values")
@@ -943,7 +1051,7 @@ func TestTimestampMicros(t *testing.T) {
}
func TestNULL(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
nullStmt, err := dbt.db.Prepare("SELECT NULL")
if err != nil {
dbt.Fatal(err)
@@ -1075,12 +1183,12 @@ func TestNULL(t *testing.T) {
}
// Insert NULL
- dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
+ dbt.mustExec("CREATE TABLE " + tbl + " (dummmy1 int, value int, dummy2 int)")
- dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
+ dbt.mustExec("INSERT INTO "+tbl+" VALUES (?, ?, ?)", 1, nil, 2)
- var out interface{}
- rows := dbt.mustQuery("SELECT * FROM test")
+ var out any
+ rows := dbt.mustQuery("SELECT * FROM " + tbl)
defer rows.Close()
if rows.Next() {
rows.Scan(&out)
@@ -1104,7 +1212,7 @@ func TestUint64(t *testing.T) {
shigh = int64(uhigh)
stop = ^shigh
)
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
if err != nil {
dbt.Fatal(err)
@@ -1168,7 +1276,7 @@ func TestLongData(t *testing.T) {
dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
}
if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
+ dbt.Error("LONGBLOB: unexpected row")
}
} else {
dbt.Fatalf("LONGBLOB: no data")
@@ -1187,7 +1295,7 @@ func TestLongData(t *testing.T) {
dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
}
if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
+ dbt.Error("LONGBLOB: unexpected row")
}
} else {
if err = rows.Err(); err != nil {
@@ -1245,7 +1353,7 @@ func TestLoadData(t *testing.T) {
dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
// Local File
- file, err := ioutil.TempFile("", "gotest")
+ file, err := os.CreateTemp("", "gotest")
defer os.Remove(file.Name())
if err != nil {
dbt.Fatal(err)
@@ -1263,7 +1371,7 @@ func TestLoadData(t *testing.T) {
dbt.Fatalf("unexpected row count: got %d, want 0", count)
}
- // Then fille File with data and try to load it
+ // Then fill File with data and try to load it
file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
file.Close()
dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
@@ -1294,18 +1402,18 @@ func TestLoadData(t *testing.T) {
_, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
if err == nil {
dbt.Fatal("load non-existent Reader didn't fail")
- } else if err.Error() != "Reader 'doesnotexist' is not registered" {
+ } else if err.Error() != "reader 'doesnotexist' is not registered" {
dbt.Fatal(err.Error())
}
})
}
-func TestFoundRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+func TestFoundRows1(t *testing.T) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO " + tbl + " (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ res := dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 0")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1313,7 +1421,7 @@ func TestFoundRows(t *testing.T) {
if count != 2 {
dbt.Fatalf("Expected 2 affected rows, got %d", count)
}
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ res = dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 1")
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1322,11 +1430,14 @@ func TestFoundRows(t *testing.T) {
dbt.Fatalf("Expected 2 affected rows, got %d", count)
}
})
- runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
+}
+
+func TestFoundRows2(t *testing.T) {
+ runTestsParallel(t, dsn+"&clientFoundRows=true", func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (id INT NOT NULL ,data INT NOT NULL)")
+ dbt.mustExec("INSERT INTO " + tbl + " (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
+ res := dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 0")
count, err := res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1334,7 +1445,7 @@ func TestFoundRows(t *testing.T) {
if count != 2 {
dbt.Fatalf("Expected 2 matched rows, got %d", count)
}
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
+ res = dbt.mustExec("UPDATE " + tbl + " SET data = 1 WHERE id = 1")
count, err = res.RowsAffected()
if err != nil {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
@@ -1402,6 +1513,7 @@ func TestReuseClosedConnection(t *testing.T) {
if err != nil {
t.Fatalf("error preparing statement: %s", err.Error())
}
+ //lint:ignore SA1019 this is a test
_, err = stmt.Exec(nil)
if err != nil {
t.Fatalf("error executing statement: %s", err.Error())
@@ -1416,6 +1528,7 @@ func TestReuseClosedConnection(t *testing.T) {
t.Errorf("panic after reusing a closed connection: %v", err)
}
}()
+ //lint:ignore SA1019 this is a test
_, err = stmt.Exec(nil)
if err != nil && err != driver.ErrBadConn {
t.Errorf("unexpected error '%s', expected '%s'",
@@ -1458,7 +1571,7 @@ func TestCharset(t *testing.T) {
}
func TestFailingCharset(t *testing.T) {
- runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
+ runTestsParallel(t, dsn+"&charset=none", func(dbt *DBTest, _ string) {
// run query to really establish connection...
_, err := dbt.db.Exec("SELECT 1")
if err == nil {
@@ -1507,7 +1620,7 @@ func TestCollation(t *testing.T) {
}
func TestColumnsWithAlias(t *testing.T) {
- runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
+ runTestsParallel(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest, _ string) {
rows := dbt.mustQuery("SELECT 1 AS A")
defer rows.Close()
cols, _ := rows.Columns()
@@ -1531,7 +1644,7 @@ func TestColumnsWithAlias(t *testing.T) {
}
func TestRawBytesResultExceedsBuffer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
// defaultBufSize from buffer.go
expected := strings.Repeat("abc", defaultBufSize)
@@ -1590,7 +1703,7 @@ func TestTimezoneConversion(t *testing.T) {
// Special cases
func TestRowsClose(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
rows, err := dbt.db.Query("SELECT 1")
if err != nil {
dbt.Fatal(err)
@@ -1615,7 +1728,7 @@ func TestRowsClose(t *testing.T) {
// dangling statements
// http://code.google.com/p/go/issues/detail?id=3865
func TestCloseStmtBeforeRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare("SELECT 1")
if err != nil {
dbt.Fatal(err)
@@ -1656,7 +1769,7 @@ func TestCloseStmtBeforeRows(t *testing.T) {
// It is valid to have multiple Rows for the same Stmt
// http://code.google.com/p/go/issues/detail?id=3734
func TestStmtMultiRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
if err != nil {
dbt.Fatal(err)
@@ -1782,7 +1895,7 @@ func TestPreparedManyCols(t *testing.T) {
// create more parameters than fit into the buffer
// which will take nil-values
- params := make([]interface{}, numParams)
+ params := make([]any, numParams)
rows, err := stmt.Query(params...)
if err != nil {
dbt.Fatal(err)
@@ -1807,13 +1920,13 @@ func TestConcurrent(t *testing.T) {
}
runTests(t, dsn, func(dbt *DBTest) {
- var version string
- if err := dbt.db.QueryRow("SELECT @@version").Scan(&version); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if strings.Contains(strings.ToLower(version), "mariadb") {
- t.Skip(`TODO: "fix commands out of sync. Did you run multiple statements at once?" on MariaDB`)
- }
+ // var version string
+ // if err := dbt.db.QueryRow("SELECT @@version").Scan(&version); err != nil {
+ // dbt.Fatal(err)
+ // }
+ // if strings.Contains(strings.ToLower(version), "mariadb") {
+ // t.Skip(`TODO: "fix commands out of sync. Did you run multiple statements at once?" on MariaDB`)
+ // }
var max int
err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
@@ -1829,7 +1942,7 @@ func TestConcurrent(t *testing.T) {
var fatalError string
var once sync.Once
- fatalf := func(s string, vals ...interface{}) {
+ fatalf := func(s string, vals ...any) {
once.Do(func() {
fatalError = fmt.Sprintf(s, vals...)
})
@@ -1840,7 +1953,6 @@ func TestConcurrent(t *testing.T) {
defer wg.Done()
tx, err := dbt.db.Begin()
- atomic.AddInt32(&remaining, -1)
if err != nil {
if err.Error() != "Error 1040: Too many connections" {
@@ -1850,7 +1962,7 @@ func TestConcurrent(t *testing.T) {
}
// keep the connection busy until all connections are open
- for remaining > 0 {
+ for atomic.AddInt32(&remaining, -1) > 0 {
if _, err = tx.Exec("DO 1"); err != nil {
fatalf("error on conn %d: %s", id, err.Error())
return
@@ -1867,7 +1979,7 @@ func TestConcurrent(t *testing.T) {
}(i)
}
- // wait until all conections are open
+ // wait until all connections are open
wg.Wait()
if fatalError != "" {
@@ -1883,7 +1995,7 @@ func testDialError(t *testing.T, dialErr error, expectErr error) {
return nil, dialErr
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -1916,13 +2028,13 @@ func TestCustomDial(t *testing.T) {
t.Skipf("MySQL server not running on %s", netAddr)
}
- // our custom dial function which justs wraps net.Dial here
+ // our custom dial function which just wraps net.Dial here
RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, prot, addr)
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -1933,6 +2045,40 @@ func TestCustomDial(t *testing.T) {
}
}
+func TestBeforeConnect(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ // dbname is set in the BeforeConnect handle
+ cfg, err := ParseDSN(fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, "_"))
+ if err != nil {
+ t.Fatalf("error parsing DSN: %v", err)
+ }
+
+ cfg.Apply(BeforeConnect(func(ctx context.Context, c *Config) error {
+ c.DBName = dbname
+ return nil
+ }))
+
+ connector, err := NewConnector(cfg)
+ if err != nil {
+ t.Fatalf("error creating connector: %v", err)
+ }
+
+ db := sql.OpenDB(connector)
+ defer db.Close()
+
+ var connectedDb string
+ err = db.QueryRow("SELECT DATABASE();").Scan(&connectedDb)
+ if err != nil {
+ t.Fatalf("error executing query: %v", err)
+ }
+ if connectedDb != dbname {
+ t.Fatalf("expected to connect to DB %s, but connected to %s instead", dbname, connectedDb)
+ }
+}
+
func TestSQLInjection(t *testing.T) {
createTest := func(arg string) func(dbt *DBTest) {
return func(dbt *DBTest) {
@@ -1995,7 +2141,7 @@ func TestInsertRetrieveEscapedData(t *testing.T) {
func TestUnixSocketAuthFail(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
// Save the current logger so we can restore it.
- oldLogger := errLog
+ oldLogger := defaultLogger
// Set a new logger so we can capture its output.
buffer := bytes.NewBuffer(make([]byte, 0, 64))
@@ -2020,7 +2166,7 @@ func TestUnixSocketAuthFail(t *testing.T) {
}
t.Logf("socket: %s", socket)
badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
- db, err := sql.Open("mysql", badDSN)
+ db, err := sql.Open(driverNameTest, badDSN)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -2155,11 +2301,51 @@ func TestRejectReadOnly(t *testing.T) {
}
func TestPing(t *testing.T) {
+ ctx := context.Background()
runTests(t, dsn, func(dbt *DBTest) {
if err := dbt.db.Ping(); err != nil {
dbt.fail("Ping", "Ping", err)
}
})
+
+ runTests(t, dsn, func(dbt *DBTest) {
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ dbt.fail("db", "Conn", err)
+ }
+
+ // Check that affectedRows and insertIds are cleared after each call.
+ conn.Raw(func(conn any) error {
+ c := conn.(*mysqlConn)
+
+ // Issue a query that sets affectedRows and insertIds.
+ q, err := c.Query(`SELECT 1`, nil)
+ if err != nil {
+ dbt.fail("Conn", "Query", err)
+ }
+ if got, want := c.result.affectedRows, []int64{0}; !reflect.DeepEqual(got, want) {
+ dbt.Fatalf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ if got, want := c.result.insertIds, []int64{0}; !reflect.DeepEqual(got, want) {
+ dbt.Fatalf("bad insertIds: got %v, want=%v", got, want)
+ }
+ q.Close()
+
+ // Verify that Ping() clears both fields.
+ for i := 0; i < 2; i++ {
+ if err := c.Ping(ctx); err != nil {
+ dbt.fail("Pinger", "Ping", err)
+ }
+ if got, want := c.result.affectedRows, []int64(nil); !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ if got, want := c.result.insertIds, []int64(nil); !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ }
+ return nil
+ })
+ })
}
// See Issue #799
@@ -2169,7 +2355,7 @@ func TestEmptyPassword(t *testing.T) {
}
dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
- db, err := sql.Open("mysql", dsn)
+ db, err := sql.Open(driverNameTest, dsn)
if err == nil {
defer db.Close()
err = db.Ping()
@@ -2379,10 +2565,47 @@ func TestMultiResultSetNoSelect(t *testing.T) {
})
}
+func TestExecMultipleResults(t *testing.T) {
+ ctx := context.Background()
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec(`
+ CREATE TABLE test (
+ id INT NOT NULL AUTO_INCREMENT,
+ value VARCHAR(255),
+ PRIMARY KEY (id)
+ )`)
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ t.Fatalf("failed to connect: %v", err)
+ }
+ conn.Raw(func(conn any) error {
+ //lint:ignore SA1019 this is a test
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ INSERT INTO test (value) VALUES ('a'), ('b');
+ INSERT INTO test (value) VALUES ('c'), ('d'), ('e');
+ `, nil)
+ if err != nil {
+ t.Fatalf("insert statements failed: %v", err)
+ }
+ mres := res.(Result)
+ if got, want := mres.AllRowsAffected(), []int64{2, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad AllRowsAffected: got %v, want=%v", got, want)
+ }
+ // For INSERTs containing multiple rows, LAST_INSERT_ID() returns the
+ // first inserted ID, not the last.
+ if got, want := mres.AllLastInsertIds(), []int64{1, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad AllLastInsertIds: got %v, want %v", got, want)
+ }
+ return nil
+ })
+ })
+}
+
// tests if rows are set in a proper state if some results were ignored before
// calling rows.NextResultSet.
func TestSkipResults(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
rows := dbt.mustQuery("SELECT 1, 2")
defer rows.Close()
@@ -2400,8 +2623,44 @@ func TestSkipResults(t *testing.T) {
})
}
+func TestQueryMultipleResults(t *testing.T) {
+ ctx := context.Background()
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec(`
+ CREATE TABLE test (
+ id INT NOT NULL AUTO_INCREMENT,
+ value VARCHAR(255),
+ PRIMARY KEY (id)
+ )`)
+ conn, err := dbt.db.Conn(ctx)
+ if err != nil {
+ t.Fatalf("failed to connect: %v", err)
+ }
+ conn.Raw(func(conn any) error {
+ //lint:ignore SA1019 this is a test
+ qr := conn.(driver.Queryer)
+ c := conn.(*mysqlConn)
+
+ // Demonstrate that repeated queries reset the affectedRows
+ for i := 0; i < 2; i++ {
+ _, err := qr.Query(`
+ INSERT INTO test (value) VALUES ('a'), ('b');
+ INSERT INTO test (value) VALUES ('c'), ('d'), ('e');
+ `, nil)
+ if err != nil {
+ t.Fatalf("insert statements failed: %v", err)
+ }
+ if got, want := c.result.affectedRows, []int64{2, 3}; !reflect.DeepEqual(got, want) {
+ t.Errorf("bad affectedRows: got %v, want=%v", got, want)
+ }
+ }
+ return nil
+ })
+ })
+}
+
func TestPingContext(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if err := dbt.db.PingContext(ctx); err != context.Canceled {
@@ -2411,8 +2670,8 @@ func TestPingContext(t *testing.T) {
}
func TestContextCancelExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
@@ -2420,7 +2679,7 @@ func TestContextCancelExec(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2432,7 +2691,7 @@ func TestContextCancelExec(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2440,14 +2699,14 @@ func TestContextCancelExec(t *testing.T) {
}
// Context is already canceled, so error should come before execution.
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)"); err == nil {
dbt.Error("expected error")
} else if err.Error() != "context canceled" {
dbt.Fatalf("unexpected error: %s", err)
}
// The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
@@ -2457,8 +2716,8 @@ func TestContextCancelExec(t *testing.T) {
}
func TestContextCancelQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
@@ -2466,7 +2725,7 @@ func TestContextCancelQuery(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2478,7 +2737,7 @@ func TestContextCancelQuery(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2486,12 +2745,12 @@ func TestContextCancelQuery(t *testing.T) {
}
// Context is already canceled, so error should come before execution.
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO "+tbl+" VALUES (1)"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
// The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 {
@@ -2501,12 +2760,12 @@ func TestContextCancelQuery(t *testing.T) {
}
func TestContextCancelQueryRow(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
+ dbt.mustExec("INSERT INTO " + tbl + " VALUES (1), (2), (3)")
ctx, cancel := context.WithCancel(context.Background())
- rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
+ rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM "+tbl)
if err != nil {
dbt.Fatalf("%s", err.Error())
}
@@ -2534,7 +2793,7 @@ func TestContextCancelQueryRow(t *testing.T) {
}
func TestContextCancelPrepare(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTestsParallel(t, dsn, func(dbt *DBTest, _ string) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
@@ -2544,10 +2803,10 @@ func TestContextCancelPrepare(t *testing.T) {
}
func TestContextCancelStmtExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
@@ -2569,7 +2828,7 @@ func TestContextCancelStmtExec(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2579,10 +2838,10 @@ func TestContextCancelStmtExec(t *testing.T) {
}
func TestContextCancelStmtQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))")
if err != nil {
dbt.Fatalf("unexpected error: %v", err)
}
@@ -2604,7 +2863,7 @@ func TestContextCancelStmtQuery(t *testing.T) {
// Check how many times the query is executed.
var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM " + tbl).Scan(&v); err != nil {
dbt.Fatalf("%s", err.Error())
}
if v != 1 { // TODO: need to kill the query, and v should be 0.
@@ -2618,8 +2877,8 @@ func TestContextCancelBegin(t *testing.T) {
t.Skip(`FIXME: it sometime fails with "expected driver.ErrBadConn, got sql: connection is already closed" on windows and macOS`)
}
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
conn, err := dbt.db.Conn(ctx)
if err != nil {
@@ -2636,7 +2895,7 @@ func TestContextCancelBegin(t *testing.T) {
// This query will be canceled.
startTime := time.Now()
- if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ if _, err := tx.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (SLEEP(1))"); err != context.Canceled {
dbt.Errorf("expected context.Canceled, got %v", err)
}
if d := time.Since(startTime); d > 500*time.Millisecond {
@@ -2674,8 +2933,8 @@ func TestContextCancelBegin(t *testing.T) {
}
func TestContextBeginIsolationLevel(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -2693,13 +2952,13 @@ func TestContextBeginIsolationLevel(t *testing.T) {
dbt.Fatal(err)
}
- _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ _, err = tx1.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)")
if err != nil {
dbt.Fatal(err)
}
var v int
- row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2713,7 +2972,7 @@ func TestContextBeginIsolationLevel(t *testing.T) {
dbt.Fatal(err)
}
- row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2726,8 +2985,8 @@ func TestContextBeginIsolationLevel(t *testing.T) {
}
func TestContextBeginReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (v INTEGER)")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -2742,14 +3001,14 @@ func TestContextBeginReadOnly(t *testing.T) {
}
// INSERT queries fail in a READ ONLY transaction.
- _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ _, err = tx.ExecContext(ctx, "INSERT INTO "+tbl+" VALUES (1)")
if _, ok := err.(*MySQLError); !ok {
dbt.Errorf("expected MySQLError, got %v", err)
}
// SELECT queries can be executed.
var v int
- row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM "+tbl)
if err := row.Scan(&v); err != nil {
dbt.Fatal(err)
}
@@ -2778,13 +3037,18 @@ func TestRowsColumnTypes(t *testing.T) {
nd1 := sql.NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
nd2 := sql.NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
ndNULL := sql.NullTime{Time: time.Time{}, Valid: false}
- rbNULL := sql.RawBytes(nil)
- rb0 := sql.RawBytes("0")
- rb42 := sql.RawBytes("42")
- rbTest := sql.RawBytes("Test")
- rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
- rbx0 := sql.RawBytes("\x00")
- rbx42 := sql.RawBytes("\x42")
+ bNULL := []byte(nil)
+ nsNULL := sql.NullString{String: "", Valid: false}
+ // Helper function to build NullString from string literal.
+ ns := func(s string) sql.NullString { return sql.NullString{String: s, Valid: true} }
+ ns0 := ns("0")
+ b0 := []byte("0")
+ b42 := []byte("42")
+ nsTest := ns("Test")
+ bTest := []byte("Test")
+ b0pad4 := []byte("0\x00\x00\x00") // BINARY right-pads values with 0x00
+ bx0 := []byte("\x00")
+ bx42 := []byte("\x42")
var columns = []struct {
name string
@@ -2795,51 +3059,54 @@ func TestRowsColumnTypes(t *testing.T) {
precision int64 // 0 if not ok
scale int64
valuesIn [3]string
- valuesOut [3]interface{}
+ valuesOut [3]any
}{
- {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
- {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
- {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
- {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
- {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
- {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
- {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
- {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
- {"tinyuint", "TINYINT UNSIGNED NOT NULL", "UNSIGNED TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
- {"smalluint", "SMALLINT UNSIGNED NOT NULL", "UNSIGNED SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
- {"biguint", "BIGINT UNSIGNED NOT NULL", "UNSIGNED BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
- {"uint13", "INT(13) UNSIGNED NOT NULL", "UNSIGNED INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
- {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
- {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
- {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
- {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
- {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
- {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
- {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
- {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
- {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
- {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
- {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
- {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
- {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
- {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
+ {"bit8null", "BIT(8)", "BIT", scanTypeBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]any{bx0, bNULL, bx42}},
+ {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]any{niNULL, ni1, ni0}},
+ {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]any{int8(1), int8(0), int8(0)}},
+ {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]any{int16(0), int16(-32768), int16(32767)}},
+ {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]any{ni0, niNULL, ni42}},
+ {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]any{int32(0), int32(-1337), int32(42)}},
+ {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]any{ni0, ni42, niNULL}},
+ {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]any{int64(0), int64(65535), int64(-42)}},
+ {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]any{niNULL, ni1, ni42}},
+ {"tinyuint", "TINYINT UNSIGNED NOT NULL", "UNSIGNED TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]any{uint8(0), uint8(255), uint8(42)}},
+ {"smalluint", "SMALLINT UNSIGNED NOT NULL", "UNSIGNED SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]any{uint16(0), uint16(65535), uint16(42)}},
+ {"biguint", "BIGINT UNSIGNED NOT NULL", "UNSIGNED BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]any{uint64(0), uint64(65535), uint64(42)}},
+ {"mediumuint", "MEDIUMINT UNSIGNED NOT NULL", "UNSIGNED MEDIUMINT", scanTypeUint32, false, 0, 0, [3]string{"0", "16777215", "42"}, [3]any{uint32(0), uint32(16777215), uint32(42)}},
+ {"uint13", "INT(13) UNSIGNED NOT NULL", "UNSIGNED INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]any{uint32(0), uint32(1337), uint32(42)}},
+ {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]any{float32(0), float32(42), float32(13.37)}},
+ {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]any{float64(0), float64(42), float64(13.37)}},
+ {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]any{nf0, nfNULL, nf1337}},
+ {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeString, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]any{"0.000000", "13.370000", "1234.123456"}},
+ {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeNullString, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]any{ns("0.000000"), nsNULL, ns("1234.123456")}},
+ {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeString, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]any{"0.0000", "13.3700", "1234.1235"}},
+ {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeNullString, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]any{ns("0.0000"), nsNULL, ns("1234.1235")}},
+ {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeString, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]any{"0", "13", "-12345"}},
+ {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeNullString, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]any{ns0, nsNULL, ns("-12345")}},
+ {"char25null", "CHAR(25)", "CHAR", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"binary4null", "BINARY(4)", "BINARY", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0pad4, bNULL, bTest}},
+ {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0, bNULL, bTest}},
+ {"tinytextnull", "TINYTEXT", "TEXT", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"blobnull", "BLOB", "BLOB", scanTypeBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{b0, bNULL, bTest}},
+ {"textnull", "TEXT", "TEXT", scanTypeNullString, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]any{ns0, nsNULL, nsTest}},
+ {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{b0, bTest, b42}},
+ {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeString, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]any{"0", "Test", "42"}},
+ {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt0, nt0}},
+ {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt1, nt2}},
+ {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]any{nt0, nt1, nt6}},
+ {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]any{nd1, ndNULL, nd2}},
+ {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]any{uint16(2006), uint16(2000), uint16(1994)}},
+ {"enum", "ENUM('', 'v1', 'v2')", "ENUM", scanTypeNullString, true, 0, 0, [3]string{"''", "'v1'", "'v2'"}, [3]any{ns(""), ns("v1"), ns("v2")}},
+ {"set", "set('', 'v1', 'v2')", "SET", scanTypeNullString, true, 0, 0, [3]string{"''", "'v1'", "'v1,v2'"}, [3]any{ns(""), ns("v1"), ns("v1,v2")}},
}
schema := ""
@@ -2945,8 +3212,11 @@ func TestRowsColumnTypes(t *testing.T) {
continue
}
}
-
- values := make([]interface{}, len(tt))
+ // Avoid panic caused by nil scantype.
+ if t.Failed() {
+ return
+ }
+ values := make([]any, len(tt))
for i := range values {
values[i] = reflect.New(types[i]).Interface()
}
@@ -2956,14 +3226,10 @@ func TestRowsColumnTypes(t *testing.T) {
if err != nil {
t.Fatalf("failed to scan values in %v", err)
}
- for j := range values {
- value := reflect.ValueOf(values[j]).Elem().Interface()
+ for j, value := range values {
+ value := reflect.ValueOf(value).Elem().Interface()
if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
- if columns[j].scanType == scanTypeRawBytes {
- t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
- } else {
- t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
- }
+ t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
}
}
i++
@@ -2979,9 +3245,9 @@ func TestRowsColumnTypes(t *testing.T) {
}
func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
- dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
+ runTestsParallel(t, dsn, func(dbt *DBTest, tbl string) {
+ dbt.mustExec("CREATE TABLE " + tbl + " (value VARCHAR(255))")
+ dbt.db.Exec("INSERT INTO "+tbl+" VALUES (?)", (*testValuer)(nil))
// This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
})
}
@@ -3015,27 +3281,28 @@ func TestRawBytesAreNotModified(t *testing.T) {
rows, err := dbt.db.QueryContext(ctx, `SELECT id, value FROM test`)
if err != nil {
- t.Fatal(err)
+ dbt.Fatal(err)
}
+ defer rows.Close()
var b int
var raw sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&b, &raw); err != nil {
- t.Fatal(err)
- }
+ if !rows.Next() {
+ dbt.Fatal("expected at least one row")
+ }
+ if err := rows.Scan(&b, &raw); err != nil {
+ dbt.Fatal(err)
+ }
- before := string(raw)
- // Ensure cancelling the query does not corrupt the contents of `raw`
- cancel()
- time.Sleep(time.Microsecond * 100)
- after := string(raw)
+ before := string(raw)
+ // Ensure cancelling the query does not corrupt the contents of `raw`
+ cancel()
+ time.Sleep(time.Microsecond * 100)
+ after := string(raw)
- if before != after {
- t.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
- }
+ if before != after {
+ dbt.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
}
- rows.Close()
}()
}
})
@@ -3058,7 +3325,7 @@ func TestConnectorObeysDialTimeouts(t *testing.T) {
return d.DialContext(ctx, prot, addr)
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
+ db, err := sql.Open(driverNameTest, fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -3209,3 +3476,137 @@ func TestConnectorTimeoutsWatchCancel(t *testing.T) {
t.Errorf("connection not closed")
}
}
+
+func TestConnectionAttributes(t *testing.T) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ defaultAttrs := []string{
+ connAttrClientName,
+ connAttrOS,
+ connAttrPlatform,
+ connAttrPid,
+ connAttrServerHost,
+ }
+ host, _, _ := net.SplitHostPort(addr)
+ defaultAttrValues := []string{
+ connAttrClientNameValue,
+ connAttrOSValue,
+ connAttrPlatformValue,
+ strconv.Itoa(os.Getpid()),
+ host,
+ }
+
+ customAttrs := []string{"attr1", "fo/o"}
+ customAttrValues := []string{"value1", "bo/o"}
+
+ customAttrStrs := make([]string, len(customAttrs))
+ for i := range customAttrs {
+ customAttrStrs[i] = fmt.Sprintf("%s:%s", customAttrs[i], customAttrValues[i])
+ }
+ dsn += "&connectionAttributes=" + url.QueryEscape(strings.Join(customAttrStrs, ","))
+
+ var db *sql.DB
+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+ db, err = sql.Open(driverNameTest, dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+ }
+
+ dbt := &DBTest{t, db}
+
+ queryString := "SELECT ATTR_NAME, ATTR_VALUE FROM performance_schema.session_account_connect_attrs WHERE PROCESSLIST_ID = CONNECTION_ID()"
+ rows := dbt.mustQuery(queryString)
+ defer rows.Close()
+
+ rowsMap := make(map[string]string)
+ for rows.Next() {
+ var attrName, attrValue string
+ rows.Scan(&attrName, &attrValue)
+ rowsMap[attrName] = attrValue
+ }
+
+ connAttrs := append(append([]string{}, defaultAttrs...), customAttrs...)
+ expectedAttrValues := append(append([]string{}, defaultAttrValues...), customAttrValues...)
+ for i := range connAttrs {
+ if gotValue := rowsMap[connAttrs[i]]; gotValue != expectedAttrValues[i] {
+ dbt.Errorf("expected %q, got %q", expectedAttrValues[i], gotValue)
+ }
+ }
+}
+
+func TestErrorInMultiResult(t *testing.T) {
+ // https://github.com/go-sql-driver/mysql/issues/1361
+ var db *sql.DB
+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+ db, err = sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+ }
+
+ dbt := &DBTest{t, db}
+ query := `
+CREATE PROCEDURE test_proc1()
+BEGIN
+ SELECT 1,2;
+ SELECT 3,4;
+ SIGNAL SQLSTATE '10000' SET MESSAGE_TEXT = "some error", MYSQL_ERRNO = 10000;
+END;
+`
+ runCallCommand(dbt, query, "test_proc1")
+}
+
+func runCallCommand(dbt *DBTest, query, name string) {
+ dbt.mustExec(fmt.Sprintf("DROP PROCEDURE IF EXISTS %s", name))
+ dbt.mustExec(query)
+ defer dbt.mustExec("DROP PROCEDURE " + name)
+ rows, err := dbt.db.Query(fmt.Sprintf("CALL %s", name))
+ if err != nil {
+ return
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ }
+ for rows.NextResultSet() {
+ for rows.Next() {
+ }
+ }
+}
+
+func TestIssue1567(t *testing.T) {
+ // enable TLS.
+ runTests(t, dsn+"&tls=skip-verify", func(dbt *DBTest) {
+ // disable connection pooling.
+ // data race happens when new connection is created.
+ dbt.db.SetMaxIdleConns(0)
+
+ // estimate round trip time.
+ start := time.Now()
+ if err := dbt.db.PingContext(context.Background()); err != nil {
+ t.Fatal(err)
+ }
+ rtt := time.Since(start)
+ if rtt <= 0 {
+ // In some environments, rtt may become 0, so set it to at least 1ms.
+ rtt = time.Millisecond
+ }
+
+ count := 1000
+ if testing.Short() {
+ count = 10
+ }
+
+ for i := 0; i < count; i++ {
+ timeout := time.Duration(mrand.Int63n(int64(rtt)))
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ dbt.db.PingContext(ctx)
+ cancel()
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go
index 4b71aaa..65f5a02 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn.go
@@ -10,6 +10,7 @@ package mysql
import (
"bytes"
+ "context"
"crypto/rsa"
"crypto/tls"
"errors"
@@ -34,22 +35,27 @@ var (
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
+ // non boolean fields
+
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
+ Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+ Logger Logger // Logger
+
+ // boolean fields
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -63,17 +69,57 @@ type Config struct {
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
+
+ // unexported fields. new options should be come here
+
+ beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
+ pubKey *rsa.PublicKey // Server public key
+ timeTruncate time.Duration // Truncate time.Time values to the specified duration
}
+// Functional Options Pattern
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type Option func(*Config) error
+
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
+ cfg := &Config{
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
+ Logger: defaultLogger,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
+
+ return cfg
+}
+
+// Apply applies the given options to the Config object.
+func (c *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TimeTruncate sets the time duration to truncate time.Time values in
+// query parameters.
+func TimeTruncate(d time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.timeTruncate = d
+ return nil
+ }
+}
+
+// BeforeConnect sets the function to be invoked before a connection is established.
+func BeforeConnect(fn func(context.Context, *Config) error) Option {
+ return func(cfg *Config) error {
+ cfg.beforeConnect = fn
+ return nil
+ }
}
func (cfg *Config) Clone() *Config {
@@ -97,7 +143,7 @@ func (cfg *Config) Clone() *Config {
}
func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
@@ -153,6 +199,10 @@ func (cfg *Config) normalize() error {
}
}
+ if cfg.Logger == nil {
+ cfg.Logger = defaultLogger
+ }
+
return nil
}
@@ -171,6 +221,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
// FormatDSN formats the given Config into a DSN string which can be passed to
// the driver.
+//
+// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
func (cfg *Config) FormatDSN() string {
var buf bytes.Buffer
@@ -196,7 +248,7 @@ func (cfg *Config) FormatDSN() string {
// /dbname
buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
+ buf.WriteString(url.PathEscape(cfg.DBName))
// [?param1=value1&...&paramN=valueN]
hasParam := false
@@ -230,7 +282,7 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@@ -254,6 +306,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
+ if cfg.timeTruncate > 0 {
+ writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
+ }
+
if cfg.ReadTimeout > 0 {
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
@@ -358,7 +414,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
break
}
}
- cfg.DBName = dsn[i+1 : j]
+
+ dbname := dsn[i+1 : j]
+ if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
+ return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
+ }
break
}
@@ -378,13 +438,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *Config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
+ key, value, found := strings.Cut(v, "=")
+ if !found {
continue
}
// cfg params
- switch value := param[1]; param[0] {
+ switch key {
// Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
@@ -490,6 +550,13 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // time.Time truncation
+ case "timeTruncate":
+ cfg.timeTruncate, err = time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
+ }
+
// I/O read Timeout
case "readTimeout":
cfg.ReadTimeout, err = time.ParseDuration(value)
@@ -554,13 +621,22 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
+
+ // Connection attributes
+ case "connectionAttributes":
+ connectionAttributes, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid connectionAttributes value: %v", err)
+ }
+ cfg.ConnectionAttributes = connectionAttributes
+
default:
// lazy init
if cfg.Params == nil {
cfg.Params = make(map[string]string)
}
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
return
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go
new file mode 100644
index 0000000..04c56ad
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_fuzz_test.go
@@ -0,0 +1,47 @@
+//go:build go1.18
+// +build go1.18
+
+package mysql
+
+import (
+ "net"
+ "testing"
+)
+
+func FuzzFormatDSN(f *testing.F) {
+ for _, test := range testDSNs { // See dsn_test.go
+ f.Add(test.in)
+ }
+
+ f.Fuzz(func(t *testing.T, dsn1 string) {
+ // Do not waste resources
+ if len(dsn1) > 1000 {
+ t.Skip("ignore: too long")
+ }
+
+ cfg1, err := ParseDSN(dsn1)
+ if err != nil {
+ t.Skipf("invalid DSN: %v", err)
+ }
+
+ dsn2 := cfg1.FormatDSN()
+ if dsn2 == dsn1 {
+ return
+ }
+
+ // Skip known cases of bad config that are not strictly checked by ParseDSN
+ if _, _, err := net.SplitHostPort(cfg1.Addr); err != nil {
+ t.Skipf("invalid addr %q: %v", cfg1.Addr, err)
+ }
+
+ cfg2, err := ParseDSN(dsn2)
+ if err != nil {
+ t.Fatalf("%q rewritten as %q: %v", dsn1, dsn2, err)
+ }
+
+ dsn3 := cfg2.FormatDSN()
+ if dsn3 != dsn2 {
+ t.Errorf("%q rewritten as %q", dsn2, dsn3)
+ }
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go
index 41a6a29..dd8cd93 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/dsn_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/dsn_test.go
@@ -22,71 +22,80 @@ var testDSNs = []struct {
out *Config
}{{
"username:password@protocol(address)/dbname?param=value",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
}, {
"user@unix(/path/to/socket)/dbname?charset=utf8",
- &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
}, {
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, Logger: defaultLogger, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
}, {
"user:password@/dbname?allowNativePasswords=false&checkConnLiveness=false&maxAllowedPacket=0&allowFallbackToPlaintext=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowFallbackToPlaintext: true, AllowNativePasswords: false, CheckConnLiveness: false},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: 0, Logger: defaultLogger, AllowFallbackToPlaintext: true, AllowNativePasswords: false, CheckConnLiveness: false},
}, {
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
- &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
+}, {
+ "/dbname%2Fwithslash",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname/withslash", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"@/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"user:p@/ssword@/",
- &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"unix/?arg=%2Fsome%2Fpath.ext",
- &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(127.0.0.1)/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
}, {
"tcp(de:ad:be:ef::ca:fe)/dbname",
- &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
+ &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true},
+}, {
+ "user:password@/dbname?loc=UTC&timeout=30s&parseTime=true&timeTruncate=1h",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Loc: time.UTC, Timeout: 30 * time.Second, ParseTime: true, MaxAllowedPacket: defaultMaxAllowedPacket, Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, timeTruncate: time.Hour},
},
}
func TestDSNParser(t *testing.T) {
for i, tst := range testDSNs {
- cfg, err := ParseDSN(tst.in)
- if err != nil {
- t.Error(err.Error())
- }
+ t.Run(tst.in, func(t *testing.T) {
+ cfg, err := ParseDSN(tst.in)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
- // pointer not static
- cfg.TLS = nil
+ // pointer not static
+ cfg.TLS = nil
- if !reflect.DeepEqual(cfg, tst.out) {
- t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
- }
+ if !reflect.DeepEqual(cfg, tst.out) {
+ t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
+ }
+ })
}
}
@@ -113,27 +122,39 @@ func TestDSNParserInvalid(t *testing.T) {
func TestDSNReformat(t *testing.T) {
for i, tst := range testDSNs {
- dsn1 := tst.in
- cfg1, err := ParseDSN(dsn1)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg1.TLS = nil // pointer not static
- res1 := fmt.Sprintf("%+v", cfg1)
-
- dsn2 := cfg1.FormatDSN()
- cfg2, err := ParseDSN(dsn2)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg2.TLS = nil // pointer not static
- res2 := fmt.Sprintf("%+v", cfg2)
+ t.Run(tst.in, func(t *testing.T) {
+ dsn1 := tst.in
+ cfg1, err := ParseDSN(dsn1)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
+ cfg1.TLS = nil // pointer not static
+ res1 := fmt.Sprintf("%+v", cfg1)
- if res1 != res2 {
- t.Errorf("%d. %q does not match %q", i, res2, res1)
- }
+ dsn2 := cfg1.FormatDSN()
+ if dsn2 != dsn1 {
+ // Just log
+ t.Logf("%d. %q reformatted as %q", i, dsn1, dsn2)
+ }
+
+ cfg2, err := ParseDSN(dsn2)
+ if err != nil {
+ t.Error(err.Error())
+ return
+ }
+ cfg2.TLS = nil // pointer not static
+ res2 := fmt.Sprintf("%+v", cfg2)
+
+ if res1 != res2 {
+ t.Errorf("%d. %q does not match %q", i, res2, res1)
+ }
+
+ dsn3 := cfg2.FormatDSN()
+ if dsn3 != dsn2 {
+ t.Errorf("%d. %q does not match %q", i, dsn2, dsn3)
+ }
+ })
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go
index ff9a8f0..a7ef889 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors.go
@@ -21,7 +21,7 @@ var (
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
@@ -37,20 +37,26 @@ var (
errBadConnNoWrite = errors.New("bad connection")
)
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
// Logger is used to log critical error messages.
type Logger interface {
- Print(v ...interface{})
+ Print(v ...any)
}
-// SetLogger is used to set the logger for critical errors.
+// NopLogger is a nop implementation of the Logger interface.
+type NopLogger struct{}
+
+// Print implements Logger interface.
+func (nl *NopLogger) Print(_ ...any) {}
+
+// SetLogger is used to set the default logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
- errLog = logger
+ defaultLogger = logger
return nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go
index 43213f9..53d6344 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/errors_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/errors_test.go
@@ -16,9 +16,9 @@ import (
)
func TestErrorsSetLogger(t *testing.T) {
- previous := errLog
+ previous := defaultLogger
defer func() {
- errLog = previous
+ defaultLogger = previous
}()
// set up logger
@@ -28,7 +28,7 @@ func TestErrorsSetLogger(t *testing.T) {
// print
SetLogger(logger)
- errLog.Print("test")
+ defaultLogger.Print("test")
// check result
if actual := buffer.String(); actual != expected {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go
index e0654a8..2860842 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/fields.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/fields.go
@@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TEXT"
}
return "BLOB"
@@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED MEDIUMINT"
+ }
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
@@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "INT"
case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "LONGTEXT"
}
return "LONGBLOB"
@@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "BIGINT"
case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
@@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "SMALLINT"
case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.flags&flagEnum != 0 {
+ return "ENUM"
+ } else if mf.flags&flagSet != 0 {
+ return "SET"
+ }
+ if mf.charSet == binaryCollationID {
return "BINARY"
}
return "CHAR"
@@ -88,17 +96,17 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "TINYINT"
case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
@@ -110,21 +118,23 @@ func (mf *mysqlField) typeDatabaseName() string {
}
var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeString = reflect.TypeOf("")
+ scanTypeNullString = reflect.TypeOf(sql.NullString{})
+ scanTypeBytes = reflect.TypeOf([]byte{})
+ scanTypeUnknown = reflect.TypeOf(new(any))
)
type mysqlField struct {
@@ -187,12 +197,18 @@ func (mf *mysqlField) scanType() reflect.Type {
}
return scanTypeNullFloat
+ case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
+ fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
+ if mf.charSet == binaryCollationID {
+ return scanTypeBytes
+ }
+ fallthrough
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
+ fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeString
+ }
+ return scanTypeNullString
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod
new file mode 100644
index 0000000..4629714
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-sql-driver/mysql
+
+go 1.18
+
+require filippo.io/edwards25519 v1.1.0
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum
new file mode 100644
index 0000000..359ca94
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/go.sum
@@ -0,0 +1,2 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go
index 3279dcf..0c8af9f 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/infile.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/infile.go
@@ -93,7 +93,7 @@ func deferredClose(err *error, closer io.Closer) {
const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
packetSize := defaultPacketSize
@@ -116,10 +116,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
defer deferredClose(&err, cl)
}
} else {
- err = fmt.Errorf("Reader '%s' is <nil>", name)
+ err = fmt.Errorf("reader '%s' is <nil>", name)
}
} else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
+ err = fmt.Errorf("reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
@@ -154,7 +154,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
@@ -168,7 +168,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
if data == nil {
data = make([]byte, 4)
}
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
@@ -177,6 +177,6 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
return mc.readResultOK()
}
- mc.readPacket()
+ mc.conn().readPacket()
return err
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go
index 36c8a42..316a48a 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime.go
@@ -38,7 +38,7 @@ type NullTime sql.NullTime
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
+func (nt *NullTime) Scan(value any) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
@@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
}
nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
+ return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go
index a14ec06..4f1d902 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/nulltime_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/nulltime_test.go
@@ -23,7 +23,7 @@ var (
func TestScanNullTime(t *testing.T) {
var scanTests = []struct {
- in interface{}
+ in any
error bool
valid bool
time time.Time
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go
index ee05c95..90a3472 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets.go
@@ -14,10 +14,10 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
- "errors"
"fmt"
"io"
"math"
+ "strconv"
"time"
)
@@ -34,7 +34,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -44,6 +44,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// check packet sync [8 bit]
if data[3] != mc.sequence {
+ mc.Close()
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
}
@@ -56,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
mc.Close()
return nil, ErrInvalidConn
}
@@ -70,7 +71,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -97,34 +98,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return ErrPktTooLarge
}
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.CheckConnLiveness {
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
- }
-
for {
var size int
if pktLen >= maxPacketSize {
@@ -161,7 +134,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
mc.cleanup()
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
} else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
@@ -171,7 +144,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return errBadConnNoWrite
}
mc.cleanup()
- errLog.Print(err)
+ mc.log(err)
}
return ErrInvalidConn
}
@@ -239,7 +212,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
// reserved (all [00]) [10 bytes]
pos += 1 + 2 + 2 + 1 + 10
- // second part of the password cipher [mininum 13 bytes],
+ // second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
//
// The web documentation is ambiguous about the length. However,
@@ -285,6 +258,7 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
+ clientConnectAttrs |
mc.flags&clientLongFlag
if mc.cfg.ClientFoundRows {
@@ -318,11 +292,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pktLen += n + 1
}
+ // encode length of the connection attributes
+ var connAttrsLEIBuf [9]byte
+ connAttrsLen := len(mc.connector.encodedAttributes)
+ connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
+ pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
+
// Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -338,14 +318,18 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[10] = 0x00
data[11] = 0x00
- // Charset [1 byte]
+ // Collation ID [1 byte]
+ cname := mc.cfg.Collation
+ if cname == "" {
+ cname = defaultCollation
+ }
var found bool
- data[12], found = collations[mc.cfg.Collation]
+ data[12], found = collations[cname]
if !found {
// Note possibility for false negatives:
// could be triggered although the collation is valid if the
// collations map does not contain entries the server supports.
- return errors.New("unknown collation")
+ return fmt.Errorf("unknown collation: %q", cname)
}
// Filler [23 bytes] (all 0x00)
@@ -367,7 +351,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
if err := tlsConn.Handshake(); err != nil {
return err
}
- mc.rawConn = mc.netConn
mc.netConn = tlsConn
mc.buf.nc = tlsConn
}
@@ -394,6 +377,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[pos] = 0x00
pos++
+ // Connection Attributes
+ pos += copy(data[pos:], connAttrsLEI)
+ pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
+
// Send Auth packet
return mc.writePacket(data[:pos])
}
@@ -404,7 +391,7 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
data, err := mc.buf.takeSmallBuffer(pktLen)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -424,7 +411,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -443,7 +430,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -464,7 +451,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -495,7 +482,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
switch data[0] {
case iOK:
- return nil, "", mc.handleOkPacket(data)
+ // resultUnchanged, since auth happens before any queries or
+ // commands have been executed.
+ return nil, "", mc.resultUnchanged().handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
@@ -518,9 +507,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
}
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
+// Returns error if Packet is not a 'Result OK'-Packet
+func (mc *okHandler) readResultOK() error {
+ data, err := mc.conn().readPacket()
if err != nil {
return err
}
@@ -528,13 +517,17 @@ func (mc *mysqlConn) readResultOK() error {
if data[0] == iOK {
return mc.handleOkPacket(data)
}
- return mc.handleErrorPacket(data)
+ return mc.conn().handleErrorPacket(data)
}
// Result Set Header Packet
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
+func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
+ // handleOkPacket replaces both values; other cases leave the values unchanged.
+ mc.result.affectedRows = append(mc.result.affectedRows, 0)
+ mc.result.insertIds = append(mc.result.insertIds, 0)
+
+ data, err := mc.conn().readPacket()
if err == nil {
switch data[0] {
@@ -542,19 +535,16 @@ func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
return 0, mc.handleOkPacket(data)
case iERR:
- return 0, mc.handleErrorPacket(data)
+ return 0, mc.conn().handleErrorPacket(data)
case iLocalInFile:
return 0, mc.handleInFileRequest(string(data[1:]))
}
// column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
+ num, _, _ := readLengthEncodedInteger(data)
+ // ignore remaining data in the packet. see #1478.
+ return int(num), nil
}
return 0, err
}
@@ -607,18 +597,61 @@ func readStatus(b []byte) statusFlag {
return statusFlag(b[0]) | statusFlag(b[1])<<8
}
+// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
+// need to be cleared first (e.g. during authentication, or while additional
+// resultsets are being fetched.)
+func (mc *mysqlConn) resultUnchanged() *okHandler {
+ return (*okHandler)(mc)
+}
+
+// okHandler represents the state of the connection when mysqlConn.result has
+// been prepared for processing of OK packets.
+//
+// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
+// callpaths must either:
+//
+// 1. first clear it using clearResult(), or
+// 2. confirm that they don't need to (by calling resultUnchanged()).
+//
+// Both return an instance of type *okHandler.
+type okHandler mysqlConn
+
+// Exposes the underlying type's methods.
+func (mc *okHandler) conn() *mysqlConn {
+ return (*mysqlConn)(mc)
+}
+
+// clearResult clears the connection's stored affectedRows and insertIds
+// fields.
+//
+// It returns a handler that can process OK responses.
+func (mc *mysqlConn) clearResult() *okHandler {
+ mc.result = mysqlResult{}
+ return (*okHandler)(mc)
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
+func (mc *okHandler) handleOkPacket(data []byte) error {
var n, m int
+ var affectedRows, insertId uint64
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+ affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+ insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // Update for the current statement result (only used by
+ // readResultSetHeaderPacket).
+ if len(mc.result.affectedRows) > 0 {
+ mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
+ }
+ if len(mc.result.insertIds) > 0 {
+ mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
+ }
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
@@ -769,7 +802,8 @@ func (rows *textRows) readRow(dest []driver.Value) error {
for i := range dest {
// Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ var buf []byte
+ buf, isNull, n, err = readLengthEncodedString(data[pos:])
pos += n
if err != nil {
@@ -781,19 +815,40 @@ func (rows *textRows) readRow(dest []driver.Value) error {
continue
}
- if !mc.parseTime {
- continue
- }
-
- // Parse time field
switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp,
fieldTypeDateTime,
fieldTypeDate,
fieldTypeNewDate:
- if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
- return err
+ if mc.parseTime {
+ dest[i], err = parseDateTime(buf, mc.cfg.Loc)
+ } else {
+ dest[i] = buf
}
+
+ case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i], err = strconv.ParseUint(string(buf), 10, 64)
+ } else {
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+ }
+
+ case fieldTypeFloat:
+ var d float64
+ d, err = strconv.ParseFloat(string(buf), 32)
+ dest[i] = float32(d)
+
+ case fieldTypeDouble:
+ dest[i], err = strconv.ParseFloat(string(buf), 64)
+
+ default:
+ dest[i] = buf
+ }
+ if err != nil {
+ return err
}
}
@@ -938,7 +993,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -1116,7 +1171,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return err
}
@@ -1137,7 +1192,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
}
@@ -1149,7 +1204,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
return mc.writePacket(data)
}
-func (mc *mysqlConn) discardResults() error {
+// For each remaining resultset in the stream, discards its rows and updates
+// mc.affectedRows and mc.insertIds.
+func (mc *okHandler) discardResults() error {
for mc.status&statusMoreResultsExists != 0 {
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
@@ -1157,11 +1214,11 @@ func (mc *mysqlConn) discardResults() error {
}
if resLen > 0 {
// columns
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
// rows
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go
index b61e4db..fa4683e 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/packets_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/packets_test.go
@@ -96,9 +96,11 @@ var _ net.Conn = new(mockConn)
func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
conn := new(mockConn)
+ connector := newConnector(NewConfig())
mc := &mysqlConn{
buf: newBuffer(conn),
- cfg: NewConfig(),
+ cfg: connector.cfg,
+ connector: connector,
netConn: conn,
closech: make(chan struct{}),
maxAllowedPacket: defaultMaxAllowedPacket,
@@ -128,30 +130,34 @@ func TestReadPacketSingleByte(t *testing.T) {
}
func TestReadPacketWrongSequenceID(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- // too low sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- mc.sequence = 1
- _, err := mc.readPacket()
- if err != ErrPktSync {
- t.Errorf("expected ErrPktSync, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // too high sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
- _, err = mc.readPacket()
- if err != ErrPktSyncMul {
- t.Errorf("expected ErrPktSyncMul, got %v", err)
+ for _, testCase := range []struct {
+ ClientSequenceID byte
+ ServerSequenceID byte
+ ExpectedErr error
+ }{
+ {
+ ClientSequenceID: 1,
+ ServerSequenceID: 0,
+ ExpectedErr: ErrPktSync,
+ },
+ {
+ ClientSequenceID: 0,
+ ServerSequenceID: 0x42,
+ ExpectedErr: ErrPktSyncMul,
+ },
+ } {
+ conn, mc := newRWMockConn(testCase.ClientSequenceID)
+
+ conn.data = []byte{0x01, 0x00, 0x00, testCase.ServerSequenceID, 0xff}
+ _, err := mc.readPacket()
+ if err != testCase.ExpectedErr {
+ t.Errorf("expected %v, got %v", testCase.ExpectedErr, err)
+ }
+
+ // connection should not be returned to the pool in this state
+ if mc.IsValid() {
+ t.Errorf("expected IsValid() to be false")
+ }
}
}
@@ -179,7 +185,7 @@ func TestReadPacketSplit(t *testing.T) {
data[4] = 0x11
data[maxPacketSize+3] = 0x22
- // 2nd packet has payload length 0 and squence id 1
+ // 2nd packet has payload length 0 and sequence id 1
// 00 00 00 01
data[pkt2ofs+3] = 0x01
@@ -211,7 +217,7 @@ func TestReadPacketSplit(t *testing.T) {
data[pkt2ofs+4] = 0x33
data[pkt2ofs+maxPacketSize+3] = 0x44
- // 3rd packet has payload length 0 and squence id 2
+ // 3rd packet has payload length 0 and sequence id 2
// 00 00 00 02
data[pkt3ofs+3] = 0x02
@@ -265,6 +271,7 @@ func TestReadPacketFail(t *testing.T) {
mc := &mysqlConn{
buf: newBuffer(conn),
closech: make(chan struct{}),
+ cfg: NewConfig(),
}
// illegal empty (stand-alone) packet
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go
new file mode 100644
index 0000000..d516314
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/result.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import "database/sql/driver"
+
+// Result exposes data not available through *connection.Result.
+//
+// This is accessible by executing statements using sql.Conn.Raw() and
+// downcasting the returned result:
+//
+// res, err := rawConn.Exec(...)
+// res.(mysql.Result).AllRowsAffected()
+type Result interface {
+ driver.Result
+ // AllRowsAffected returns a slice containing the affected rows for each
+ // executed statement.
+ AllRowsAffected() []int64
+ // AllLastInsertIds returns a slice containing the last inserted ID for each
+ // executed statement.
+ AllLastInsertIds() []int64
+}
+
+type mysqlResult struct {
+ // One entry in both slices is created for every executed statement result.
+ affectedRows []int64
+ insertIds []int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertIds[len(res.insertIds)-1], nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows[len(res.affectedRows)-1], nil
+}
+
+func (res *mysqlResult) AllLastInsertIds() []int64 {
+ return append([]int64{}, res.insertIds...) // defensive copy
+}
+
+func (res *mysqlResult) AllRowsAffected() []int64 {
+ return append([]int64{}, res.affectedRows...) // defensive copy
+}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go
index 888bdb5..81fa606 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/rows.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/rows.go
@@ -123,7 +123,8 @@ func (rows *mysqlRows) Close() (err error) {
err = mc.readUntilEOF()
}
if err == nil {
- if err = mc.discardResults(); err != nil {
+ handleOk := mc.clearResult()
+ if err = handleOk.discardResults(); err != nil {
return err
}
}
@@ -160,7 +161,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
return 0, io.EOF
}
rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
+ // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
+ // nextResultSet.
+ resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
+ if err != nil {
+ // Clean up about multi-results flag
+ rows.rs.done = true
+ rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
+ }
+ return resLen, err
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go
index 10ece8b..0436f22 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement.go
@@ -51,7 +51,7 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
+ handleOk := stmt.mc.clearResult()
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
}
- if err := mc.discardResults(); err != nil {
+ if err := handleOk.discardResults(); err != nil {
return nil, err
}
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+ copied := mc.result
+ return &copied, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -99,7 +95,7 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -111,7 +107,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
mc := stmt.mc
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ handleOk := stmt.mc.clearResult()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -144,7 +141,7 @@ type converter struct{}
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+func (c converter) ConvertValue(v any) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go
index 2563ece..15f9d7c 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/statement_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/statement_test.go
@@ -77,7 +77,7 @@ func TestConvertPointer(t *testing.T) {
}
func TestConvertSignedIntegers(t *testing.T) {
- values := []interface{}{
+ values := []any{
int8(-42),
int16(-42),
int32(-42),
@@ -106,7 +106,7 @@ func (u myUint64) Value() (driver.Value, error) {
}
func TestConvertUnsignedIntegers(t *testing.T) {
- values := []interface{}{
+ values := []any{
uint8(42),
uint16(42),
uint32(42),
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go
index 4a4b610..4a4b610 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/transaction.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/transaction.go
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go
index 15dbd8d..cda24fe 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils.go
@@ -36,7 +36,7 @@ var (
// registering it.
//
// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// pem, err := os.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
-func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
+ if timeTruncate > 0 {
+ t = t.Truncate(timeTruncate)
+ }
+
year, month, day := t.Date()
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
@@ -616,6 +620,11 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
}
+func appendLengthEncodedString(b []byte, s string) []byte {
+ b = appendLengthEncodedInteger(b, uint64(len(s)))
+ return append(b, s...)
+}
+
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
// If cap(buf) is not enough, reallocate new buffer.
func reserveBuffer(buf []byte, appendSize int) []byte {
diff --git a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go
index 4e5fc3c..80aebdd 100644
--- a/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.7.1/utils_test.go
+++ b/dependencies/pkg/mod/github.com/go-sql-driver/mysql@v1.8.1/utils_test.go
@@ -237,8 +237,10 @@ func TestIsolationLevelMapping(t *testing.T) {
func TestAppendDateTime(t *testing.T) {
tests := []struct {
- t time.Time
- str string
+ t time.Time
+ str string
+ timeTruncate time.Duration
+ expectedErr bool
}{
{
t: time.Date(1234, 5, 6, 0, 0, 0, 0, time.UTC),
@@ -276,34 +278,75 @@ func TestAppendDateTime(t *testing.T) {
t: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),
str: "0001-01-01",
},
+ // Truncated time
+ {
+ t: time.Date(1234, 5, 6, 0, 0, 0, 0, time.UTC),
+ str: "1234-05-06",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(4567, 12, 31, 12, 0, 0, 0, time.UTC),
+ str: "4567-12-31 12:00:00",
+ timeTruncate: time.Minute,
+ },
+ {
+ t: time.Date(2020, 5, 30, 12, 34, 0, 0, time.UTC),
+ str: "2020-05-30 12:34:00",
+ timeTruncate: 0,
+ },
+ {
+ t: time.Date(2020, 5, 30, 12, 34, 56, 0, time.UTC),
+ str: "2020-05-30 12:34:56",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123000000, time.UTC),
+ str: "2020-05-30 22:33:44",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123456000, time.UTC),
+ str: "2020-05-30 22:33:44.123",
+ timeTruncate: time.Millisecond,
+ },
+ {
+ t: time.Date(2020, 5, 30, 22, 33, 44, 123456789, time.UTC),
+ str: "2020-05-30 22:33:44",
+ timeTruncate: time.Second,
+ },
+ {
+ t: time.Date(9999, 12, 31, 23, 59, 59, 999999999, time.UTC),
+ str: "9999-12-31 23:59:59.999999999",
+ timeTruncate: 0,
+ },
+ {
+ t: time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC),
+ str: "0001-01-01",
+ timeTruncate: 365 * 24 * time.Hour,
+ },
+ // year out of range
+ {
+ t: time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC),
+ expectedErr: true,
+ },
+ {
+ t: time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC),
+ expectedErr: true,
+ },
}
for _, v := range tests {
buf := make([]byte, 0, 32)
- buf, _ = appendDateTime(buf, v.t)
+ buf, err := appendDateTime(buf, v.t, v.timeTruncate)
+ if err != nil {
+ if !v.expectedErr {
+ t.Errorf("appendDateTime(%v) returned an errror: %v", v.t, err)
+ }
+ continue
+ }
if str := string(buf); str != v.str {
t.Errorf("appendDateTime(%v), have: %s, want: %s", v.t, str, v.str)
}
}
-
- // year out of range
- {
- v := time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC)
- buf := make([]byte, 0, 32)
- _, err := appendDateTime(buf, v)
- if err == nil {
- t.Error("want an error")
- return
- }
- }
- {
- v := time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC)
- buf := make([]byte, 0, 32)
- _, err := appendDateTime(buf, v)
- if err == nil {
- t.Error("want an error")
- return
- }
- }
}
func TestParseDateTime(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml
index 8364eea..8364eea 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.codecov.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.codecov.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml
index ab4b632..ab4b632 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..1aad475
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,29 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: bug
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+
+Please provide a minimum yaml content that can be reproduced.
+We are more than happy to use [Go Playground](https://go.dev/play)
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Version Variables**
+ - Go version: [e.g. 1.21 ]
+ - go-yaml's Version: [e.g. v1.11.1 ]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..e301d68
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: feature request
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md
new file mode 100644
index 0000000..a1d6c8e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/pull_request_template.md
@@ -0,0 +1,4 @@
+Before submitting your PR, please confirm the following.
+
+- [ ] Describe the purpose for which you created this PR.
+- [ ] Create test code that corresponds to the modification \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml
index a935b3f..647c692 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/.github/workflows/go.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/.github/workflows/go.yml
@@ -10,15 +10,15 @@ jobs:
strategy:
matrix:
os: [ "ubuntu-latest", "macos-latest", "windows-latest" ]
- go-version: [ "1.18", "1.19", "1.20" ]
+ go-version: [ "1.19", "1.20", "1.21" ]
runs-on: ${{ matrix.os }}
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go ${{ matrix.go-version }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- - name: checkout
- uses: actions/checkout@v3
- name: test
run: |
make test
@@ -28,17 +28,17 @@ jobs:
strategy:
matrix:
os: [ "ubuntu-latest", "windows-latest" ]
- go-version: [ "1.18", "1.19", "1.20" ]
+ go-version: [ "1.19", "1.20", "1.21" ]
runs-on: ${{ matrix.os }}
env:
GOARCH: "386"
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go ${{ matrix.go-version }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- - name: checkout
- uses: actions/checkout@v3
- name: test
run: |
make simple-test
@@ -47,12 +47,12 @@ jobs:
name: ycat
runs-on: ubuntu-latest
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
- go-version: "1.20"
- - name: checkout
- uses: actions/checkout@v3
+ go-version: "1.21"
- name: build
run: |
make ycat/build
@@ -64,12 +64,12 @@ jobs:
name: Coverage
runs-on: ubuntu-latest
steps:
+ - name: checkout
+ uses: actions/checkout@v4
- name: setup Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
- go-version: "1.20"
- - name: checkout
- uses: actions/checkout@v3
+ go-version: "1.21"
- name: measure coverage
run: |
make cover
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md
index 2887563..c8f820d 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/CHANGELOG.md
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/CHANGELOG.md
@@ -1,3 +1,26 @@
+# 1.11.2 - 2023-09-15
+
+### Fix bugs
+
+- Fix quoted comments ( #370 )
+- Fix handle of space at start or last ( #376 )
+- Fix sequence with comment ( #390 )
+
+# 1.11.1 - 2023-09-14
+
+### Fix bugs
+
+- Handle `\r` in a double-quoted string the same as `\n` ( #372 )
+- Replace loop with n.Values = append(n.Values, target.Values...) ( #380 )
+- Skip encoding an inline field if it is null ( #386 )
+- Fix comment parsing with null value ( #388 )
+
+# 1.11.0 - 2023-04-03
+
+### Features
+
+- Supports dynamically switch encode and decode processing for a given type
+
# 1.10.1 - 2023-03-28
### Features
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE
index 04485ce..04485ce 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/LICENSE
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/LICENSE
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile
index 1b1d923..1b1d923 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/Makefile
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/Makefile
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md
index 9452349..9452349 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/README.md
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/README.md
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go
index f535a24..b4d5ec4 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast.go
@@ -1506,9 +1506,7 @@ func (n *SequenceNode) Replace(idx int, value Node) error {
func (n *SequenceNode) Merge(target *SequenceNode) {
column := n.Start.Position.Column - target.Start.Position.Column
target.AddColumn(column)
- for _, value := range target.Values {
- n.Values = append(n.Values, value)
- }
+ n.Values = append(n.Values, target.Values...)
}
// SetIsFlowStyle set value to IsFlowStyle field recursively.
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go
index e7022b6..e7022b6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/ast/ast_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/ast/ast_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go
index c70cb3b..c70cb3b 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/cmd/ycat/ycat.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/cmd/ycat/ycat.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go
index d3dbabc..d3dbabc 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go
index 2549bea..cabfd33 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/decode_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/decode_test.go
@@ -68,6 +68,18 @@ func TestDecoder(t *testing.T) {
map[string]string{"v": "1.234"},
},
{
+ "v: \" foo\"\n",
+ map[string]string{"v": " foo"},
+ },
+ {
+ "v: \"foo \"\n",
+ map[string]string{"v": "foo "},
+ },
+ {
+ "v: \" foo \"\n",
+ map[string]string{"v": " foo "},
+ },
+ {
"v: false\n",
map[string]bool{"v": false},
},
@@ -425,6 +437,14 @@ func TestDecoder(t *testing.T) {
`"1": "a\x2Fb\u002Fc\U0000002Fd"`,
map[interface{}]interface{}{"1": `a/b/c/d`},
},
+ {
+ "'1': \"2\\n3\"",
+ map[interface{}]interface{}{"1": "2\n3"},
+ },
+ {
+ "'1': \"2\\r\\n3\"",
+ map[interface{}]interface{}{"1": "2\r\n3"},
+ },
{
"a: -b_c",
@@ -841,6 +861,14 @@ func TestDecoder(t *testing.T) {
},
},
{
+ "v:\n- A\n- |-\n B\n C\n\n\n",
+ map[string][]string{
+ "v": {
+ "A", "B\nC",
+ },
+ },
+ },
+ {
"v:\n- A\n- >-\n B\n C\n",
map[string][]string{
"v": {
@@ -849,6 +877,14 @@ func TestDecoder(t *testing.T) {
},
},
{
+ "v:\n- A\n- >-\n B\n C\n\n\n",
+ map[string][]string{
+ "v": {
+ "A", "B C",
+ },
+ },
+ },
+ {
"a: b\nc: d\n",
struct {
A string
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go
index 7d8d81e..3b9b298 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode.go
@@ -823,6 +823,10 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column
}
mapNode, ok := value.(ast.MapNode)
if !ok {
+ // if an inline field is null, skip encoding it
+ if _, ok := value.(*ast.NullNode); ok {
+ continue
+ }
return nil, xerrors.Errorf("inline value is must be map or struct type")
}
mapIter := mapNode.MapRange()
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go
index 74b1aa5..3ff6f1c 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/encode_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/encode_test.go
@@ -304,6 +304,21 @@ func TestEncoder(t *testing.T) {
nil,
},
{
+ "a: \" b\"\n",
+ map[string]string{"a": " b"},
+ nil,
+ },
+ {
+ "a: \"b \"\n",
+ map[string]string{"a": "b "},
+ nil,
+ },
+ {
+ "a: \" b \"\n",
+ map[string]string{"a": " b "},
+ nil,
+ },
+ {
"a: 100.5\n",
map[string]interface{}{
"a": 100.5,
@@ -980,6 +995,30 @@ c: true
}
}
+func TestEncoder_InlineNil(t *testing.T) {
+ type base struct {
+ A int
+ B string
+ }
+ var buf bytes.Buffer
+ enc := yaml.NewEncoder(&buf)
+ if err := enc.Encode(struct {
+ *base `yaml:",inline"`
+ C bool
+ }{
+ C: true,
+ }); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ expect := `
+c: true
+`
+ actual := "\n" + buf.String()
+ if expect != actual {
+ t.Fatalf("inline marshal error: expect=[%s] actual=[%s]", expect, actual)
+ }
+}
+
func TestEncoder_Flow(t *testing.T) {
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf, yaml.Flow(true))
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go
index 163dcc5..163dcc5 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/error.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/error.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod
index f6e74c3..4550ff3 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.mod
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.mod
@@ -1,6 +1,6 @@
module github.com/goccy/go-yaml
-go 1.18
+go 1.19
require (
github.com/fatih/color v1.10.0
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum
index 7249df6..7249df6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/go.sum
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/go.sum
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go
index 7f1ea9a..7f1ea9a 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/internal/errors/error.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/internal/errors/error.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go
index 3207f4f..3207f4f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go
index 0d8f648..0d8f648 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/lexer/lexer_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/lexer/lexer_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go
index eab5d43..eab5d43 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/option.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/option.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go
index 99f18b1..42cc4f8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/context.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/context.go
@@ -13,7 +13,6 @@ type context struct {
idx int
size int
tokens token.Tokens
- mode Mode
path string
}
@@ -56,7 +55,6 @@ func (c *context) copy() *context {
idx: c.idx,
size: c.size,
tokens: append(token.Tokens{}, c.tokens...),
- mode: c.mode,
path: c.path,
}
}
@@ -145,10 +143,6 @@ func (c *context) afterNextNotCommentToken() *token.Token {
return nil
}
-func (c *context) enabledComment() bool {
- return c.mode&ParseComments != 0
-}
-
func (c *context) isCurrentCommentToken() bool {
tk := c.currentToken()
if tk == nil {
@@ -193,7 +187,6 @@ func newContext(tokens token.Tokens, mode Mode) *context {
idx: 0,
size: len(filteredTokens),
tokens: token.Tokens(filteredTokens),
- mode: mode,
path: "$",
}
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go
index 568e6ad..13ada50 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser.go
@@ -156,15 +156,38 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
ctx.insertToken(ctx.idx, nullToken)
return ast.Null(nullToken), nil
}
-
+ var comment *ast.CommentGroupNode
+ if tk.Type == token.CommentType {
+ comment = p.parseCommentOnly(ctx)
+ if comment != nil {
+ comment.SetPath(ctx.withChild(key.GetToken().Value).path)
+ }
+ tk = ctx.currentToken()
+ }
if tk.Position.Column == key.GetToken().Position.Column && tk.Type == token.StringType {
// in this case,
// ----
// key: <value does not defined>
// next
+
nullToken := p.createNullToken(colonToken)
ctx.insertToken(ctx.idx, nullToken)
- return ast.Null(nullToken), nil
+ nullNode := ast.Null(nullToken)
+
+ if comment != nil {
+ nullNode.SetComment(comment)
+ } else {
+ // If there is a comment, it is already bound to the key node,
+ // so remove the comment from the key to bind it to the null value.
+ keyComment := key.GetComment()
+ if keyComment != nil {
+ if err := key.SetComment(nil); err != nil {
+ return nil, err
+ }
+ nullNode.SetComment(keyComment)
+ }
+ }
+ return nullNode, nil
}
if tk.Position.Column < key.GetToken().Position.Column {
@@ -174,13 +197,20 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken
// next
nullToken := p.createNullToken(colonToken)
ctx.insertToken(ctx.idx, nullToken)
- return ast.Null(nullToken), nil
+ nullNode := ast.Null(nullToken)
+ if comment != nil {
+ nullNode.SetComment(comment)
+ }
+ return nullNode, nil
}
value, err := p.parseToken(ctx, ctx.currentToken())
if err != nil {
return nil, errors.Wrapf(err, "failed to parse mapping 'value' node")
}
+ if comment != nil {
+ value.SetComment(comment)
+ }
return value, nil
}
@@ -304,10 +334,9 @@ func (p *parser) parseSequenceEntry(ctx *context) (*ast.SequenceNode, error) {
if tk.Type == token.CommentType {
comment = p.parseCommentOnly(ctx)
tk = ctx.currentToken()
- if tk.Type != token.SequenceEntryType {
- break
+ if tk.Type == token.SequenceEntryType {
+ ctx.progress(1) // skip sequence token
}
- ctx.progress(1) // skip sequence token
}
value, err := p.parseToken(ctx.withIndex(uint(len(sequenceNode.Values))), ctx.currentToken())
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go
index 6044699..8d697e8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/parser_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/parser_test.go
@@ -7,6 +7,7 @@ import (
"strings"
"testing"
+ "github.com/goccy/go-yaml"
"github.com/goccy/go-yaml/ast"
"github.com/goccy/go-yaml/lexer"
"github.com/goccy/go-yaml/parser"
@@ -808,6 +809,125 @@ foo: > # comment
}
}
+func TestCommentWithNull(t *testing.T) {
+ t.Run("same line", func(t *testing.T) {
+ content := `
+foo:
+ bar: # comment
+ baz: 1
+`
+ expected := `
+foo:
+ bar: null # comment
+ baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with same line comment")
+ }
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+ t.Run("next line", func(t *testing.T) {
+ content := `
+foo:
+ bar:
+ # comment
+ baz: 1
+`
+ expected := `
+foo:
+ bar: null # comment
+ baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line comment")
+ }
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+ t.Run("next line and different indent", func(t *testing.T) {
+ content := `
+foo:
+ bar:
+ # comment
+baz: 1`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line comment")
+ }
+ expected := `
+foo:
+ bar: null # comment
+baz: 1`
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ })
+}
+
+func TestSequenceComment(t *testing.T) {
+ content := `
+foo:
+ - # comment
+ bar: 1
+baz:
+ - xxx
+`
+ f, err := parser.ParseBytes([]byte(content), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(f.Docs) != 1 {
+ t.Fatal("failed to parse content with next line with sequence")
+ }
+ expected := `
+foo:
+ # comment
+ - bar: 1
+baz:
+ - xxx`
+ if f.Docs[0].String() != strings.TrimPrefix(expected, "\n") {
+ t.Fatal("failed to parse comment")
+ }
+ t.Run("foo[0].bar", func(t *testing.T) {
+ path, err := yaml.PathString("$.foo[0].bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := path.FilterFile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v.String() != "1" {
+ t.Fatal("failed to get foo[0].bar value")
+ }
+ })
+ t.Run("baz[0]", func(t *testing.T) {
+ path, err := yaml.PathString("$.baz[0]")
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := path.FilterFile(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v.String() != "xxx" {
+ t.Fatal("failed to get baz[0] value")
+ }
+ })
+}
+
func TestNodePath(t *testing.T) {
yml := `
a: # commentA
@@ -824,6 +944,8 @@ a: # commentA
i: fuga # commentI
j: piyo # commentJ
k.l.m.n: moge # commentKLMN
+o#p: hogera # commentOP
+q#.r: hogehoge # commentQR
`
f, err := parser.ParseBytes([]byte(yml), parser.ParseComments)
if err != nil {
@@ -854,6 +976,8 @@ k.l.m.n: moge # commentKLMN
"$.a.i",
"$.j",
"$.'k.l.m.n'",
+ "$.o#p",
+ "$.'q#.r'",
}
if !reflect.DeepEqual(expectedPaths, commentPaths) {
t.Fatalf("failed to get YAMLPath to the comment node:\nexpected[%s]\ngot [%s]", expectedPaths, commentPaths)
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml
index 37b52a6..37b52a6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/cr.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/cr.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml
index 85929f9..85929f9 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/crlf.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/crlf.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml
index d2fe51f..d2fe51f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/parser/testdata/lf.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/parser/testdata/lf.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go
index 7a0c3b1..72554bd 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path.go
@@ -500,11 +500,29 @@ func newSelectorNode(selector string) *selectorNode {
}
func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
+ selector := n.selector
+ if len(selector) > 1 && selector[0] == '\'' && selector[len(selector)-1] == '\'' {
+ selector = selector[1 : len(selector)-1]
+ }
switch node.Type() {
case ast.MappingType:
for _, value := range node.(*ast.MappingNode).Values {
key := value.Key.GetToken().Value
- if key == n.selector {
+ if len(key) > 0 {
+ switch key[0] {
+ case '"':
+ var err error
+ key, err = strconv.Unquote(key)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to unquote")
+ }
+ case '\'':
+ if len(key) > 1 && key[len(key)-1] == '\'' {
+ key = key[1 : len(key)-1]
+ }
+ }
+ }
+ if key == selector {
if n.child == nil {
return value.Value, nil
}
@@ -518,7 +536,7 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
case ast.MappingValueType:
value := node.(*ast.MappingValueNode)
key := value.Key.GetToken().Value
- if key == n.selector {
+ if key == selector {
if n.child == nil {
return value.Value, nil
}
@@ -571,7 +589,9 @@ func (n *selectorNode) replace(node ast.Node, target ast.Node) error {
}
func (n *selectorNode) String() string {
- s := fmt.Sprintf(".%s", n.selector)
+ var builder PathBuilder
+ selector := builder.normalizeSelectorName(n.selector)
+ s := fmt.Sprintf(".%s", selector)
if n.child != nil {
s += n.child.String()
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go
index 4271da5..c0073ce 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/path_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/path_test.go
@@ -61,6 +61,8 @@ store:
bicycle:
color: red
price: 19.95
+ bicycle*unicycle:
+ price: 20.25
`
tests := []struct {
name string
@@ -97,6 +99,11 @@ store:
path: builder().Root().Child("store").Child("bicycle").Child("price").Build(),
expected: float64(19.95),
},
+ {
+ name: `$.store.'bicycle*unicycle'.price`,
+ path: builder().Root().Child("store").Child(`bicycle*unicycle`).Child("price").Build(),
+ expected: float64(20.25),
+ },
}
t.Run("PathString", func(t *testing.T) {
for _, test := range tests {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go
index d5e25dc..d5e25dc 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go
index 2afa74f..2afa74f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/printer/printer_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/printer/printer_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go
index 09d0a2d..3aaec56 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/context.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/context.go
@@ -196,9 +196,16 @@ func (c *Context) existsBuffer() bool {
func (c *Context) bufferedSrc() []rune {
src := c.buf[:c.notSpaceCharPos]
- if len(src) > 0 && src[len(src)-1] == '\n' && c.isDocument() && c.literalOpt == "-" {
- // remove end '\n' character
- src = src[:len(src)-1]
+ if c.isDocument() && c.literalOpt == "-" {
+ // remove end '\n' character and trailing empty lines
+ // https://yaml.org/spec/1.2.2/#8112-block-chomping-indicator
+ for {
+ if len(src) > 0 && src[len(src)-1] == '\n' {
+ src = src[:len(src)-1]
+ continue
+ }
+ break
+ }
}
return src
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go
index ce9c665..b0eac48 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/scanner/scanner.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/scanner/scanner.go
@@ -339,6 +339,11 @@ func (s *Scanner) scanDoubleQuote(ctx *Context) (tk *token.Token, pos int) {
value = append(value, '\n')
idx++
continue
+ case 'r':
+ ctx.addOriginBuf(nextChar)
+ value = append(value, '\r')
+ idx++
+ continue
case 'v':
ctx.addOriginBuf(nextChar)
value = append(value, '\v')
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go
index be50ae6..be50ae6 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/stdlib_quote.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/stdlib_quote.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go
index a3da8dd..a3da8dd 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/struct.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/struct.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml
index c016c7f..c016c7f 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/testdata/anchor.yml
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/testdata/anchor.yml
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go
index 182f4be..c86caab 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token.go
@@ -623,12 +623,12 @@ func IsNeedQuoted(value string) bool {
}
first := value[0]
switch first {
- case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@':
+ case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ':
return true
}
last := value[len(value)-1]
switch last {
- case ':':
+ case ':', ' ':
return true
}
if looksLikeTimeValue(value) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go
index 4f5764f..dc1b4df 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/token/token_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/token/token_test.go
@@ -118,6 +118,9 @@ func TestIsNeedQuoted(t *testing.T) {
"Off",
"OFF",
"@test",
+ " a",
+ " a ",
+ "a ",
}
for i, test := range needQuotedTests {
if !token.IsNeedQuoted(test) {
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go
index 20a2d6d..20a2d6d 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go
index 265deb8..265deb8 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/validate_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/validate_test.go
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go
index 2e541d8..25b1056 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml.go
@@ -89,43 +89,42 @@ func (s MapSlice) ToMap() map[interface{}]interface{} {
//
// The field tag format accepted is:
//
-// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Zero valued structs will be omitted if all their public
-// fields are zero, unless they implement an IsZero
-// method (see the IsZeroer interface type), in which
-// case the field will be included if that method returns true.
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps).
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
//
-// inline Inline the field, which must be a struct or a map,
-// causing all of its fields or keys to be processed as if
-// they were part of the outer struct. For maps, keys must
-// not conflict with the yaml keys of other struct fields.
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
//
-// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style.
-// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name
+// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style.
+// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name
//
-// alias Marshal with alias. If want to define alias name explicitly, use alias=name style.
-// Otherwise, If omitted alias name and the field type is pointer type,
-// assigned anchor name automatically from same pointer address.
+// alias Marshal with alias. If want to define alias name explicitly, use alias=name style.
+// Otherwise, If omitted alias name and the field type is pointer type,
+// assigned anchor name automatically from same pointer address.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n"
-//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n"
func Marshal(v interface{}) ([]byte, error) {
return MarshalWithOptions(v)
}
@@ -167,16 +166,15 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) {
//
// For example:
//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
-//
func Unmarshal(data []byte, v interface{}) error {
return UnmarshalWithOptions(data, v)
}
diff --git a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go
index 5828629..4446d31 100644
--- a/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.0/yaml_test.go
+++ b/dependencies/pkg/mod/github.com/goccy/go-yaml@v1.11.3/yaml_test.go
@@ -2,6 +2,7 @@ package yaml_test
import (
"bytes"
+ "strings"
"testing"
"github.com/google/go-cmp/cmp"
@@ -1161,6 +1162,94 @@ hoge:
})
}
+func TestCommentMapRoundTrip(t *testing.T) {
+ // test that an unmarshal and marshal round trip retains comments.
+ // if expect is empty, the test will use the input as the expected result.
+ tests := []struct {
+ name string
+ source string
+ expect string
+ encodeOptions []yaml.EncodeOption
+ }{
+ {
+ name: "simple map",
+ source: `
+# head
+a: 1 # line
+# foot
+`,
+ },
+ {
+ name: "nesting",
+ source: `
+- 1 # one
+- foo:
+ a: b
+ # c comment
+ c: d # d comment
+ "e#f": g # g comment
+ h.i: j # j comment
+ "k.#l": m # m comment
+`,
+ },
+ {
+ name: "single quotes",
+ source: `'a#b': c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ },
+ {
+ name: "single quotes added in encode",
+ source: `a#b: c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ expect: `'a#b': c # c comment`,
+ },
+ {
+ name: "double quotes quotes transformed to single quotes",
+ source: `"a#b": c # c comment`,
+ encodeOptions: []yaml.EncodeOption{yaml.UseSingleQuote(true)},
+ expect: `'a#b': c # c comment`,
+ },
+ {
+ name: "single quotes quotes transformed to double quotes",
+ source: `'a#b': c # c comment`,
+ expect: `"a#b": c # c comment`,
+ },
+ {
+ name: "single quotes removed",
+ source: `'a': b # b comment`,
+ expect: `a: b # b comment`,
+ },
+ {
+ name: "double quotes removed",
+ source: `"a": b # b comment`,
+ expect: `a: b # b comment`,
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ var val any
+ cm := yaml.CommentMap{}
+ source := strings.TrimSpace(test.source)
+ if err := yaml.UnmarshalWithOptions([]byte(source), &val, yaml.CommentToMap(cm)); err != nil {
+ t.Fatalf("%+v", err)
+ }
+ marshaled, err := yaml.MarshalWithOptions(val, append(test.encodeOptions, yaml.WithComment(cm))...)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ got := strings.TrimSpace(string(marshaled))
+ expect := strings.TrimSpace(test.expect)
+ if expect == "" {
+ expect = source
+ }
+ if got != expect {
+ t.Fatalf("expected:\n%s\ngot:\n%s\n", expect, got)
+ }
+ })
+
+ }
+}
+
func TestRegisterCustomMarshaler(t *testing.T) {
type T struct {
Foo []byte `yaml:"foo"`
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml b/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md
deleted file mode 100644
index 04fdf09..0000000
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTING.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# How to contribute
-
-We definitely welcome patches and contribution to this project!
-
-### Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-You may have already signed it for other Google projects.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS
new file mode 100644
index 0000000..91a5fa3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS
@@ -0,0 +1,6 @@
+# Code owners file.
+# This file controls who is tagged for review for any given pull request.
+
+# For syntax help see:
+# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+* @google/go-uuid-contributors
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml
new file mode 100644
index 0000000..e1bcc3c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml
@@ -0,0 +1,2 @@
+handleGHRelease: true
+releaseType: go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml
new file mode 100644
index 0000000..37c0b5f
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml
@@ -0,0 +1,26 @@
+---
+name: apidiff
+on:
+ pull_request:
+ branches:
+ - master
+permissions:
+ contents: read
+jobs:
+ compat:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v4
+ with:
+ go-version: 1.21
+ - run: go install golang.org/x/exp/cmd/apidiff@latest
+ - uses: actions/checkout@v3
+ with:
+ ref: master
+ - run: apidiff -w uuid.baseline .
+ - uses: actions/checkout@v3
+ with:
+ clean: false
+ - run: |
+ apidiff -incompatible uuid.baseline . > diff.txt
+ cat diff.txt && ! [ -s diff.txt ]
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml
new file mode 100644
index 0000000..138397c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml
@@ -0,0 +1,20 @@
+---
+name: tests
+on:
+ pull_request:
+ branches:
+ - master
+permissions:
+ contents: read
+jobs:
+ unit-tests:
+ strategy:
+ matrix:
+ go-version: [1.19, 1.20.x, 1.21]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: ${{ matrix.go-version }}
+ - run: go test -v ./...
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md
new file mode 100644
index 0000000..a502fdc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS
index b4bb97f..b4bb97f 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/CONTRIBUTORS
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE
index 5dc6826..5dc6826 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/LICENSE
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md
index f765a46..3e9a618 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/README.md
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go
index fa820b9..fa820b9 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/dce.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go
index 5b8a4b9..5b8a4b9 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/doc.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod
index fc84cd7..fc84cd7 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/go.mod
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go
index b404f4b..dc60082 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/hash.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go
@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go
index 245f91e..34241d5 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/json_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go
@@ -31,6 +31,57 @@ func TestJSON(t *testing.T) {
}
}
+func TestJSONUnmarshal(t *testing.T) {
+ type S struct {
+ ID1 UUID
+ ID2 UUID `json:"ID2,omitempty"`
+ }
+
+ testCases := map[string]struct {
+ data []byte
+ expectedError error
+ expectedResult UUID
+ }{
+ "success": {
+ data: []byte(`{"ID1": "f47ac10b-58cc-0372-8567-0e02b2c3d479"}`),
+ expectedError: nil,
+ expectedResult: testUUID,
+ },
+ "zero": {
+ data: []byte(`{"ID1": "00000000-0000-0000-0000-000000000000"}`),
+ expectedError: nil,
+ expectedResult: Nil,
+ },
+ "null": {
+ data: []byte(`{"ID1": null}`),
+ expectedError: nil,
+ expectedResult: Nil,
+ },
+ "empty": {
+ data: []byte(`{"ID1": ""}`),
+ expectedError: invalidLengthError{len: 0},
+ expectedResult: Nil,
+ },
+ "omitempty": {
+ data: []byte(`{"ID2": ""}`),
+ expectedError: invalidLengthError{len: 0},
+ expectedResult: Nil,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ var s S
+ if err := json.Unmarshal(tc.data, &s); err != tc.expectedError {
+ t.Errorf("unexpected error: got %v, want %v", err, tc.expectedError)
+ }
+ if !reflect.DeepEqual(s.ID1, tc.expectedResult) {
+ t.Errorf("got %#v, want %#v", s.ID1, tc.expectedResult)
+ }
+ })
+ }
+}
+
func BenchmarkUUID_MarshalJSON(b *testing.B) {
x := &struct {
UUID UUID `json:"uuid"`
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go
index 14bd340..14bd340 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/marshal.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go
index d651a2b..d651a2b 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_js.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go
index 0cbbcdd..0cbbcdd 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/node_net.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go
index d7fcbf2..d7fcbf2 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go
index c6e5e69..c6e5e69 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/null_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go
index 4f6c549..4f6c549 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/seq_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go
index 2e02ec0..2e02ec0 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go
index 1803dfd..1803dfd 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/sql_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go
index e6ef06c..c351129 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/time.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go
index 5ea6c73..5ea6c73 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/util.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go
index a57207a..5232b48 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go
@@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok
}
-// Parse decodes s into a UUID or returns an error. Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@@ -292,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock()
poolPos = randPoolSize
}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go
index e98d0fe..c1c6001 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/uuid_test.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go
@@ -6,6 +6,7 @@ package uuid
import (
"bytes"
+ "errors"
"fmt"
"os"
"runtime"
@@ -73,6 +74,9 @@ var tests = []test{
{"f47ac10b58cc037285670e02b2c3d479", 0, RFC4122, true},
{"f47ac10b58cc037285670e02b2c3d4790", 0, Invalid, false},
{"f47ac10b58cc037285670e02b2c3d47", 0, Invalid, false},
+
+ {"01ee836c-e7c9-619d-929a-525400475911", 6, RFC4122, true},
+ {"018bd12c-58b0-7683-8a5b-8752d0e86651", 7, RFC4122, true},
}
var constants = []struct {
@@ -569,6 +573,67 @@ func TestIsWrongLength(t *testing.T) {
}
}
+func FuzzParse(f *testing.F) {
+ for _, tt := range tests {
+ f.Add(tt.in)
+ f.Add(strings.ToUpper(tt.in))
+ }
+ f.Fuzz(func(t *testing.T, in string) {
+ Parse(in)
+ })
+}
+
+func FuzzParseBytes(f *testing.F) {
+ for _, tt := range tests {
+ f.Add([]byte(tt.in))
+ }
+ f.Fuzz(func(t *testing.T, in []byte) {
+ ParseBytes(in)
+ })
+}
+
+func FuzzFromBytes(f *testing.F) {
+ // Copied from TestFromBytes.
+ f.Add([]byte{
+ 0x7d, 0x44, 0x48, 0x40,
+ 0x9d, 0xc0,
+ 0x11, 0xd1,
+ 0xb2, 0x45,
+ 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
+ })
+ f.Fuzz(func(t *testing.T, in []byte) {
+ FromBytes(in)
+ })
+}
+
+// TestValidate checks various scenarios for the Validate function
+func TestValidate(t *testing.T) {
+ testCases := []struct {
+ name string
+ input string
+ expect error
+ }{
+ {"Valid UUID", "123e4567-e89b-12d3-a456-426655440000", nil},
+ {"Valid UUID with URN", "urn:uuid:123e4567-e89b-12d3-a456-426655440000", nil},
+ {"Valid UUID with Braces", "{123e4567-e89b-12d3-a456-426655440000}", nil},
+ {"Valid UUID No Hyphens", "123e4567e89b12d3a456426655440000", nil},
+ {"Invalid UUID", "invalid-uuid", errors.New("invalid UUID length: 12")},
+ {"Invalid Length", "123", fmt.Errorf("invalid UUID length: %d", len("123"))},
+ {"Invalid URN Prefix", "urn:test:123e4567-e89b-12d3-a456-426655440000", fmt.Errorf("invalid urn prefix: %q", "urn:test:")},
+ {"Invalid Brackets", "[123e4567-e89b-12d3-a456-426655440000]", fmt.Errorf("invalid bracketed UUID format")},
+ {"Invalid UUID Format", "12345678gabc1234abcd1234abcd1234", fmt.Errorf("invalid UUID format")},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := Validate(tc.input)
+ if (err != nil) != (tc.expect != nil) || (err != nil && err.Error() != tc.expect.Error()) {
+ t.Errorf("Validate(%q) = %v, want %v", tc.input, err, tc.expect)
+ }
+ })
+ }
+}
+
var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
var asBytes = []byte(asString)
@@ -700,3 +765,166 @@ func BenchmarkUUID_NewPooled(b *testing.B) {
}
})
}
+
+func BenchmarkUUIDs_Strings(b *testing.B) {
+ uuid1, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+ if err != nil {
+ b.Fatal(err)
+ }
+ uuid2, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
+ if err != nil {
+ b.Fatal(err)
+ }
+ uuids := UUIDs{uuid1, uuid2}
+ for i := 0; i < b.N; i++ {
+ uuids.Strings()
+ }
+}
+
+func TestVersion6(t *testing.T) {
+ uuid1, err := NewV6()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ uuid2, err := NewV6()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+
+ if uuid1 == uuid2 {
+ t.Errorf("%s:duplicate uuid", uuid1)
+ }
+ if v := uuid1.Version(); v != 6 {
+ t.Errorf("%s: version %s expected 6", uuid1, v)
+ }
+ if v := uuid2.Version(); v != 6 {
+ t.Errorf("%s: version %s expected 6", uuid2, v)
+ }
+ n1 := uuid1.NodeID()
+ n2 := uuid2.NodeID()
+ if !bytes.Equal(n1, n2) {
+ t.Errorf("Different nodes %x != %x", n1, n2)
+ }
+ t1 := uuid1.Time()
+ t2 := uuid2.Time()
+ q1 := uuid1.ClockSequence()
+ q2 := uuid2.ClockSequence()
+
+ switch {
+ case t1 == t2 && q1 == q2:
+ t.Error("time stopped")
+ case t1 > t2 && q1 == q2:
+ t.Error("time reversed")
+ case t1 < t2 && q1 != q2:
+ t.Error("clock sequence changed unexpectedly")
+ }
+}
+
+// uuid v7 time is only unix milliseconds, so
+// uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2
+func TestVersion7(t *testing.T) {
+ SetRand(nil)
+ m := make(map[string]bool)
+ for x := 1; x < 128; x++ {
+ uuid, err := NewV7()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ s := uuid.String()
+ if m[s] {
+ t.Errorf("NewV7 returned duplicated UUID %s", s)
+ }
+ m[s] = true
+ if v := uuid.Version(); v != 7 {
+ t.Errorf("UUID of version %s", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("UUID is variant %d", uuid.Variant())
+ }
+ }
+}
+
+// uuid v7 time is only unix milliseconds, so
+// uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2
+func TestVersion7_pooled(t *testing.T) {
+ SetRand(nil)
+ EnableRandPool()
+ defer DisableRandPool()
+
+ m := make(map[string]bool)
+ for x := 1; x < 128; x++ {
+ uuid, err := NewV7()
+ if err != nil {
+ t.Fatalf("could not create UUID: %v", err)
+ }
+ s := uuid.String()
+ if m[s] {
+ t.Errorf("NewV7 returned duplicated UUID %s", s)
+ }
+ m[s] = true
+ if v := uuid.Version(); v != 7 {
+ t.Errorf("UUID of version %s", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("UUID is variant %d", uuid.Variant())
+ }
+ }
+}
+
+func TestVersion7FromReader(t *testing.T) {
+ myString := "8059ddhdle77cb52"
+ r := bytes.NewReader([]byte(myString))
+ _, err := NewV7FromReader(r)
+ if err != nil {
+ t.Errorf("failed generating UUID from a reader")
+ }
+ _, err = NewV7FromReader(r)
+ if err == nil {
+ t.Errorf("expecting an error as reader has no more bytes. Got uuid. NewV7FromReader may not be using the provided reader")
+ }
+}
+
+func TestVersion7Monotonicity(t *testing.T) {
+ length := 10000
+ u1 := Must(NewV7()).String()
+ for i := 0; i < length; i++ {
+ u2 := Must(NewV7()).String()
+ if u2 <= u1 {
+ t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1)
+ break
+ }
+ u1 = u2
+ }
+}
+
+type fakeRand struct{}
+
+func (g fakeRand) Read(bs []byte) (int, error) {
+ for i, _ := range bs {
+ bs[i] = 0x88
+ }
+ return len(bs), nil
+}
+
+func TestVersion7MonotonicityStrict(t *testing.T) {
+ timeNow = func() time.Time {
+ return time.Date(2008, 8, 8, 8, 8, 8, 8, time.UTC)
+ }
+ defer func() {
+ timeNow = time.Now
+ }()
+
+ SetRand(fakeRand{})
+ defer SetRand(nil)
+
+ length := 100000 // > 3906
+ u1 := Must(NewV7()).String()
+ for i := 0; i < length; i++ {
+ u2 := Must(NewV7()).String()
+ if u2 <= u1 {
+ t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1)
+ break
+ }
+ u1 = u2
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go
index 4631096..4631096 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version1.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go
index 7697802..7697802 100644
--- a/dependencies/pkg/mod/github.com/google/uuid@v1.3.0/version4.go
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml
index 35cde5c..35cde5c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.codecov.yml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.codecov.yml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml
index 8f80494..8f80494 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml
index e198c52..e198c52 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/cifuzz.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/cifuzz.yaml
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml
index 83faeb6..7983de4 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/docker.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/docker.yaml
@@ -17,6 +17,5 @@ jobs:
- name: Run example - simple
run: |
- cd ./_example/simple
- docker build -t simple .
+ docker build -t simple -f ./_example/simple/Dockerfile .
docker run simple | grep 99\ こんにちは世界099
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml
index 923274b..c96bf31 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.github/workflows/go.yaml
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.github/workflows/go.yaml
@@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
- go: ['1.18', '1.19', '1.20']
+ go: ['1.19', '1.20', '1.21']
fail-fast: false
env:
OS: ${{ matrix.os }}
@@ -64,7 +64,7 @@ jobs:
strategy:
matrix:
- go: ['1.18', '1.19', '1.20']
+ go: ['1.19', '1.20', '1.21']
fail-fast: false
env:
OS: windows-latest
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore
index fa0e6b5..fa0e6b5 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/.gitignore
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/.gitignore
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE
index ca458bb..ca458bb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/LICENSE
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/LICENSE
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md
index 1804a89..1804a89 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/README.md
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/README.md
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile
index 91fcde6..91fcde6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go
index 3148cae..3148cae 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_driver_name/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_driver_name/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go
index 85657e6..85657e6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/custom_func/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/custom_func/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go
index 5326044..5326044 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/fuzz/fuzz_openexec.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/fuzz/fuzz_openexec.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go
index 6023181..6023181 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/hook/hook.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/hook/hook.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go
new file mode 100644
index 0000000..181934b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/json/json.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ _ "github.com/mattn/go-sqlite3"
+ "log"
+ "os"
+)
+
+type Tag struct {
+ Name string `json:"name"`
+ Country string `json:"country"`
+}
+
+func (t *Tag) Scan(value interface{}) error {
+ return json.Unmarshal([]byte(value.(string)), t)
+}
+
+func (t *Tag) Value() (driver.Value, error) {
+ b, err := json.Marshal(t)
+ return string(b), err
+}
+
+func main() {
+ os.Remove("./foo.db")
+
+ db, err := sql.Open("sqlite3", "./foo.db")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ _, err = db.Exec(`create table foo (tag jsonb)`)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ stmt, err := db.Prepare("insert into foo(tag) values(?)")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec(`{"name": "mattn", "country": "japan"}`)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, err = stmt.Exec(`{"name": "michael", "country": "usa"}`)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var country string
+ err = db.QueryRow("select tag->>'country' from foo where tag->>'name' = 'mattn'").Scan(&country)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(country)
+
+ var tag Tag
+ err = db.QueryRow("select tag from foo where tag->>'name' = 'mattn'").Scan(&tag)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(tag.Name)
+
+ tag.Country = "日本"
+ _, err = db.Exec(`update foo set tag = ? where tag->>'name' == 'mattn'`, &tag)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = db.QueryRow("select tag->>'country' from foo where tag->>'name' = 'mattn'").Scan(&country)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(country)
+}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go
index bcba819..c1adfe8 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/limit/limit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/limit/limit.go
@@ -10,9 +10,9 @@ import (
"github.com/mattn/go-sqlite3"
)
-func createBulkInsertQuery(n int, start int) (query string, args []interface{}) {
+func createBulkInsertQuery(n int, start int) (query string, args []any) {
values := make([]string, n)
- args = make([]interface{}, n*2)
+ args = make([]any, n*2)
pos := 0
for i := 0; i < n; i++ {
values[i] = "(?, ?)"
@@ -27,7 +27,7 @@ func createBulkInsertQuery(n int, start int) (query string, args []interface{})
return
}
-func bulkInsert(db *sql.DB, query string, args []interface{}) (err error) {
+func bulkInsert(db *sql.DB, query string, args []any) (err error) {
stmt, err := db.Prepare(query)
if err != nil {
return
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile
index 1ef69a6..1ef69a6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go
index 61ceb55..61ceb55 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/extension.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c
index d3ad149..d3ad149 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_regexp/sqlite3_mod_regexp.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_regexp/sqlite3_mod_regexp.c
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile
index f65a004..f65a004 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/Makefile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/Makefile
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go
index f738af6..f738af6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/extension.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h
index 2142647..2142647 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/picojson.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/picojson.h
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc
index 4caf484..4caf484 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/mod_vtable/sqlite3_mod_vtable.cc
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/mod_vtable/sqlite3_mod_vtable.cc
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile
index c19f6e6..8ed0473 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/Dockerfile
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/Dockerfile
@@ -9,7 +9,7 @@
# -----------------------------------------------------------------------------
# Build Stage
# -----------------------------------------------------------------------------
-FROM golang:alpine AS build
+FROM golang:alpine3.18 AS build
# Important:
# Because this is a CGO enabled package, you are required to set it as 1.
@@ -26,7 +26,9 @@ WORKDIR /workspace
COPY . /workspace/
RUN \
+ cd _example/simple && \
go mod init github.com/mattn/sample && \
+ go mod edit -replace=github.com/mattn/go-sqlite3=../.. && \
go mod tidy && \
go install -ldflags='-s -w -extldflags "-static"' ./simple.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go
index 0c34791..0c34791 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/simple/simple.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/simple/simple.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go
index bef3d15..bef3d15 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/trace/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/trace/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go
index aad8dda..aad8dda 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go
index 10d12a9..c65535b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable/vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable/vtable.go
@@ -93,7 +93,7 @@ func (vc *ghRepoCursor) Column(c *sqlite3.SQLiteContext, col int) error {
return nil
}
-func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go
index 17b58af..17b58af 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/main.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/main.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go
index 49fc0b7..9f22ebc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/_example/vtable_eponymous_only/vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/_example/vtable_eponymous_only/vtable.go
@@ -77,7 +77,7 @@ func (vc *seriesCursor) Column(c *sqlite3.SQLiteContext, col int) error {
return nil
}
-func (vc *seriesCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *seriesCursor) Filter(idxNum int, idxStr string, vals []any) error {
switch {
case len(vals) < 1:
vc.seriesTable.start = 0
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go
index ecbb469..ecbb469 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go
index 6d857de..b3ad0b5 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/backup_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/backup_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go
index d305691..b794bcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback.go
@@ -100,13 +100,13 @@ func preUpdateHookTrampoline(handle unsafe.Pointer, dbHandle uintptr, op int, db
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
- val interface{}
+ val any
}
var handleLock sync.Mutex
var handleVals = make(map[unsafe.Pointer]handleVal)
-func newHandle(db *SQLiteConn, v interface{}) unsafe.Pointer {
+func newHandle(db *SQLiteConn, v any) unsafe.Pointer {
handleLock.Lock()
defer handleLock.Unlock()
val := handleVal{db: db, val: v}
@@ -124,7 +124,7 @@ func lookupHandleVal(handle unsafe.Pointer) handleVal {
return handleVals[handle]
}
-func lookupHandle(handle unsafe.Pointer) interface{} {
+func lookupHandle(handle unsafe.Pointer) any {
return lookupHandleVal(handle).val
}
@@ -238,7 +238,7 @@ func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
- return nil, errors.New("the only supported interface type is interface{}")
+ return nil, errors.New("the only supported interface type is any")
}
return callbackArgGeneric, nil
case reflect.Slice:
@@ -360,11 +360,11 @@ func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error {
}
cb, err := callbackRet(v.Elem().Type())
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
+ }
- return cb(ctx, v.Elem())
+ return cb(ctx, v.Elem())
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go
index b09122a..8163f2f 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/callback_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/callback_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
@@ -53,7 +54,7 @@ func TestCallbackArgCast(t *testing.T) {
func TestCallbackConverters(t *testing.T) {
tests := []struct {
- v interface{}
+ v any
err bool
}{
// Unfortunately, we can't tell which converter was returned,
@@ -104,7 +105,7 @@ func TestCallbackConverters(t *testing.T) {
}
func TestCallbackReturnAny(t *testing.T) {
- udf := func() interface{} {
+ udf := func() any {
return 1
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go
index 0385073..f7a9dcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/convert.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/convert.go
@@ -23,7 +23,7 @@ var errNilPtr = errors.New("destination pointer is nil") // embedded in descript
// convertAssign copies to dest the value in src, converting it if possible.
// An error is returned if the copy would result in loss of information.
// dest should be a pointer type.
-func convertAssign(dest, src interface{}) error {
+func convertAssign(dest, src any) error {
// Common cases, without reflect.
switch s := src.(type) {
case string:
@@ -55,7 +55,7 @@ func convertAssign(dest, src interface{}) error {
}
*d = string(s)
return nil
- case *interface{}:
+ case *any:
if d == nil {
return errNilPtr
}
@@ -97,7 +97,7 @@ func convertAssign(dest, src interface{}) error {
}
case nil:
switch d := dest.(type) {
- case *interface{}:
+ case *any:
if d == nil {
return errNilPtr
}
@@ -149,7 +149,7 @@ func convertAssign(dest, src interface{}) error {
*d = bv.(bool)
}
return err
- case *interface{}:
+ case *any:
*d = src
return nil
}
@@ -256,7 +256,7 @@ func cloneBytes(b []byte) []byte {
return c
}
-func asString(src interface{}) string {
+func asString(src any) string {
switch v := src.(type) {
case string:
return v
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go
index ac27633..a3bcebb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/doc.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/doc.go
@@ -5,63 +5,63 @@ This works as a driver for database/sql.
Installation
- go get github.com/mattn/go-sqlite3
+ go get github.com/mattn/go-sqlite3
-Supported Types
+# Supported Types
Currently, go-sqlite3 supports the following data types.
- +------------------------------+
- |go | sqlite3 |
- |----------|-------------------|
- |nil | null |
- |int | integer |
- |int64 | integer |
- |float64 | float |
- |bool | integer |
- |[]byte | blob |
- |string | text |
- |time.Time | timestamp/datetime|
- +------------------------------+
-
-SQLite3 Extension
+ +------------------------------+
+ |go | sqlite3 |
+ |----------|-------------------|
+ |nil | null |
+ |int | integer |
+ |int64 | integer |
+ |float64 | float |
+ |bool | integer |
+ |[]byte | blob |
+ |string | text |
+ |time.Time | timestamp/datetime|
+ +------------------------------+
+
+# SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
- #include <pcre.h>
- #include <string.h>
- #include <stdio.h>
- #include <sqlite3ext.h>
-
- SQLITE_EXTENSION_INIT1
- static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
- if (argc >= 2) {
- const char *target = (const char *)sqlite3_value_text(argv[1]);
- const char *pattern = (const char *)sqlite3_value_text(argv[0]);
- const char* errstr = NULL;
- int erroff = 0;
- int vec[500];
- int n, rc;
- pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
- rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
- if (rc <= 0) {
- sqlite3_result_error(context, errstr, 0);
- return;
- }
- sqlite3_result_int(context, 1);
- }
- }
-
- #ifdef _WIN32
- __declspec(dllexport)
- #endif
- int sqlite3_extension_init(sqlite3 *db, char **errmsg,
- const sqlite3_api_routines *api) {
- SQLITE_EXTENSION_INIT2(api);
- return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
- (void*)db, regexp_func, NULL, NULL);
- }
+ #include <pcre.h>
+ #include <string.h>
+ #include <stdio.h>
+ #include <sqlite3ext.h>
+
+ SQLITE_EXTENSION_INIT1
+ static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
+ if (argc >= 2) {
+ const char *target = (const char *)sqlite3_value_text(argv[1]);
+ const char *pattern = (const char *)sqlite3_value_text(argv[0]);
+ const char* errstr = NULL;
+ int erroff = 0;
+ int vec[500];
+ int n, rc;
+ pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
+ rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
+ if (rc <= 0) {
+ sqlite3_result_error(context, errstr, 0);
+ return;
+ }
+ sqlite3_result_int(context, 1);
+ }
+ }
+
+ #ifdef _WIN32
+ __declspec(dllexport)
+ #endif
+ int sqlite3_extension_init(sqlite3 *db, char **errmsg,
+ const sqlite3_api_routines *api) {
+ SQLITE_EXTENSION_INIT2(api);
+ return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
+ (void*)db, regexp_func, NULL, NULL);
+ }
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
@@ -77,7 +77,7 @@ Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
-Connection Hook
+# Connection Hook
You can hook and inject your code when the connection is established by setting
ConnectHook to get the SQLiteConn.
@@ -95,13 +95,13 @@ You can also use database/sql.Conn.Raw (Go >= 1.13):
conn, err := db.Conn(context.Background())
// if err != nil { ... }
defer conn.Close()
- err = conn.Raw(func (driverConn interface{}) error {
+ err = conn.Raw(func (driverConn any) error {
sqliteConn := driverConn.(*sqlite3.SQLiteConn)
// ... use sqliteConn
})
// if err != nil { ... }
-Go SQlite3 Extensions
+# Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions
you can make a custom driver by calling RegisterFunction from
@@ -130,6 +130,5 @@ You can then use the custom driver by passing its name to sql.Open.
}
See the documentation of RegisterFunc for more details.
-
*/
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go
index 58ab252..58ab252 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go
index 3cfad06..0ff14c1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/error_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/error_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod
index 89788ab..e342dcc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.mod
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.mod
@@ -1,6 +1,6 @@
module github.com/mattn/go-sqlite3
-go 1.16
+go 1.19
retract (
[v2.0.0+incompatible, v2.0.6+incompatible] // Accidental; no major changes or features.
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum
index e69de29..e69de29 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/go.sum
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/go.sum
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c
index a1d6a28..53d7560 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.c
@@ -1,7 +1,7 @@
#ifndef USE_LIBSQLITE3
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.42.0. By combining all the individual C code files into this
+** version 3.45.1. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -17,6 +17,9 @@
** if you want a wrapper to interface SQLite with your choice of programming
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
+**
+** The content in this amalgamation comes from Fossil check-in
+** e876e51a0ed5c5b3126f52e532044363a014.
*/
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1
@@ -51,11 +54,11 @@
** used on lines of code that actually
** implement parts of coverage testing.
**
-** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false
+** OPTIMIZATION-IF-TRUE - This branch is allowed to always be false
** and the correct answer is still obtained,
** though perhaps more slowly.
**
-** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true
+** OPTIMIZATION-IF-FALSE - This branch is allowed to always be true
** and the correct answer is still obtained,
** though perhaps more slowly.
**
@@ -457,9 +460,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.42.0"
-#define SQLITE_VERSION_NUMBER 3042000
-#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0"
+#define SQLITE_VERSION "3.45.1"
+#define SQLITE_VERSION_NUMBER 3045001
+#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -839,6 +842,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8))
+#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
@@ -1501,7 +1505,7 @@ struct sqlite3_io_methods {
** by clients within the current process, only within other processes.
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the
** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
@@ -2437,7 +2441,7 @@ struct sqlite3_mem_methods {
** is stored in each sorted record and the required column values loaded
** from the database as records are returned in sorted order. The default
** value for this option is to never use this optimization. Specifying a
-** negative value for this option restores the default behaviour.
+** negative value for this option restores the default behavior.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
@@ -2612,7 +2616,7 @@ struct sqlite3_mem_methods {
** database handle, SQLite checks if this will mean that there are now no
** connections at all to the database. If so, it performs a checkpoint
** operation before closing the connection. This option may be used to
-** override this behaviour. The first parameter passed to this operation
+** override this behavior. The first parameter passed to this operation
** is an integer - positive to disable checkpoints-on-close, or zero (the
** default) to enable them, and negative to leave the setting unchanged.
** The second parameter is a pointer to an integer
@@ -2765,7 +2769,7 @@ struct sqlite3_mem_methods {
** the [VACUUM] command will fail with an obscure error when attempting to
** process a table with generated columns and a descending index. This is
** not considered a bug since SQLite versions 3.3.0 and earlier do not support
-** either generated columns or decending indexes.
+** either generated columns or descending indexes.
** </dd>
**
** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]]
@@ -3046,6 +3050,7 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
**
** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
** or not an interrupt is currently in effect for [database connection] D.
+** It returns 1 if an interrupt is currently in effect, or 0 otherwise.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
SQLITE_API int sqlite3_is_interrupted(sqlite3*);
@@ -3699,8 +3704,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** M argument should be the bitwise OR-ed combination of
** zero or more [SQLITE_TRACE] constants.
**
-** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
-** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P)
+** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or
+** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each
+** database connection may have at most one trace callback.
**
** ^The X callback is invoked whenever any of the events identified by
** mask M occur. ^The integer return value from the callback is currently
@@ -4069,7 +4076,7 @@ SQLITE_API int sqlite3_open_v2(
** as F) must be one of:
** <ul>
** <li> A database filename pointer created by the SQLite core and
-** passed into the xOpen() method of a VFS implemention, or
+** passed into the xOpen() method of a VFS implementation, or
** <li> A filename obtained from [sqlite3_db_filename()], or
** <li> A new filename constructed using [sqlite3_create_filename()].
** </ul>
@@ -4182,7 +4189,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
/*
** CAPI3REF: Create and Destroy VFS Filenames
**
-** These interfces are provided for use by [VFS shim] implementations and
+** These interfaces are provided for use by [VFS shim] implementations and
** are not useful outside of that context.
**
** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
@@ -4261,14 +4268,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** text that describes the error, as either UTF-8 or UTF-16 respectively,
+** or NULL if no error message is available.
+** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
+** ^The sqlite3_errstr(E) interface returns the English-language text
+** that describes the [result code] E, as UTF-8, or NULL if E is not an
+** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
@@ -4730,6 +4740,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
+** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN
+** setting for [prepared statement] S. If E is zero, then S becomes
+** a normal prepared statement. If E is 1, then S behaves as if
+** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if
+** its SQL text began with "[EXPLAIN QUERY PLAN]".
+**
+** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared.
+** SQLite tries to avoid a reprepare, but a reprepare might be necessary
+** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode.
+**
+** Because of the potential need to reprepare, a call to
+** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be
+** reprepared because it was created using [sqlite3_prepare()] instead of
+** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and
+** hence has no saved SQL text with which to reprepare.
+**
+** Changing the explain setting for a prepared statement does not change
+** the original SQL text for the statement. Hence, if the SQL text originally
+** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0)
+** is called to convert the statement into an ordinary statement, the EXPLAIN
+** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S)
+** output, even though the statement now acts like a normal SQL statement.
+**
+** This routine returns SQLITE_OK if the explain mode is successfully
+** changed, or an error code if the explain mode could not be changed.
+** The explain mode cannot be changed while a statement is active.
+** Hence, it is good practice to call [sqlite3_reset(S)]
+** immediately prior to calling sqlite3_stmt_explain(S,E).
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode);
+
+/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
@@ -4892,7 +4937,7 @@ typedef struct sqlite3_context sqlite3_context;
** with it may be passed. ^It is called to dispose of the BLOB or string even
** if the call to the bind API fails, except the destructor is not called if
** the third parameter is a NULL pointer or the fourth parameter is negative.
-** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that
+** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that
** the application remains responsible for disposing of the object. ^In this
** case, the object and the provided pointer to it must remain valid until
** either the prepared statement is finalized or the same SQL parameter is
@@ -5571,20 +5616,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
** back to the beginning of its program.
**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
-** or if [sqlite3_step(S)] has never before been called on S,
-** then [sqlite3_reset(S)] returns [SQLITE_OK].
+** ^The return code from [sqlite3_reset(S)] indicates whether or not
+** the previous evaluation of prepared statement S completed successfully.
+** ^If [sqlite3_step(S)] has never before been called on S or if
+** [sqlite3_step(S)] has not been called since the previous call
+** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return
+** [SQLITE_OK].
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S indicated an error, then
** [sqlite3_reset(S)] returns an appropriate [error code].
+** ^The [sqlite3_reset(S)] interface might also return an [error code]
+** if there were no prior errors but the process of resetting
+** the prepared statement caused a new error. ^For example, if an
+** [INSERT] statement with a [RETURNING] clause is only stepped one time,
+** that one call to [sqlite3_step(S)] might return SQLITE_ROW but
+** the overall statement might still fail and the [sqlite3_reset(S)] call
+** might return SQLITE_BUSY if locking constraints prevent the
+** database change from committing. Therefore, it is important that
+** applications check the return code from [sqlite3_reset(S)] even if
+** no prior call to [sqlite3_step(S)] indicated a problem.
**
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
@@ -5795,7 +5853,7 @@ SQLITE_API int sqlite3_create_window_function(
** [application-defined SQL function]
** that has side-effects or that could potentially leak sensitive information.
** This will prevent attacks in which an application is tricked
-** into using a database file that has had its schema surreptiously
+** into using a database file that has had its schema surreptitiously
** modified to invoke the application-defined function in ways that are
** harmful.
** <p>
@@ -5831,13 +5889,27 @@ SQLITE_API int sqlite3_create_window_function(
** </dd>
**
** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
-** Specifying this flag makes no difference for scalar or aggregate user
-** functions. However, if it is not specified for a user-defined window
-** function, then any sub-types belonging to arguments passed to the window
-** function may be discarded before the window function is called (i.e.
-** sqlite3_value_subtype() will always return 0).
+** This flag instructs SQLite to omit some corner-case optimizations that
+** might disrupt the operation of the [sqlite3_value_subtype()] function,
+** causing it to return zero rather than the correct subtype().
+** SQL functions that invokes [sqlite3_value_subtype()] should have this
+** property. If the SQLITE_SUBTYPE property is omitted, then the return
+** value from [sqlite3_value_subtype()] might sometimes be zero even though
+** a non-zero subtype was specified by the function argument expression.
+**
+** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd>
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call
+** [sqlite3_result_subtype()] to cause a sub-type to be associated with its
+** result.
+** Every function that invokes [sqlite3_result_subtype()] should have this
+** property. If it does not, then the call to [sqlite3_result_subtype()]
+** might become a no-op if the function is used as term in an
+** [expression index]. On the other hand, SQL functions that never invoke
+** [sqlite3_result_subtype()] should avoid setting this property, as the
+** purpose of this property is to disable certain optimizations that are
+** incompatible with subtypes.
** </dd>
** </dl>
*/
@@ -5845,6 +5917,7 @@ SQLITE_API int sqlite3_create_window_function(
#define SQLITE_DIRECTONLY 0x000080000
#define SQLITE_SUBTYPE 0x000100000
#define SQLITE_INNOCUOUS 0x000200000
+#define SQLITE_RESULT_SUBTYPE 0x001000000
/*
** CAPI3REF: Deprecated Functions
@@ -6041,6 +6114,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
** information can be used to pass a limited amount of context from
** one SQL function to another. Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
+**
+** Every [application-defined SQL function] that invoke this interface
+** should include the [SQLITE_SUBTYPE] property in the text
+** encoding argument when the function is [sqlite3_create_function|registered].
+** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype()
+** might return zero instead of the upstream subtype in some corner cases.
*/
SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
@@ -6139,48 +6218,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
-** associate metadata with argument values. If the same value is passed to
-** multiple invocations of the same SQL function during query execution, under
-** some circumstances the associated metadata may be preserved. An example
-** of where this might be useful is in a regular-expression matching
-** function. The compiled version of the regular expression can be stored as
-** metadata associated with the pattern string.
+** associate auxiliary data with argument values. If the same argument
+** value is passed to multiple invocations of the same SQL function during
+** query execution, under some circumstances the associated auxiliary data
+** might be preserved. An example of where this might be useful is in a
+** regular-expression matching function. The compiled version of the regular
+** expression can be stored as auxiliary data associated with the pattern string.
** Then as long as the pattern string remains the same,
** the compiled regular expression can be reused on multiple
** invocations of the same function.
**
-** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data
** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
** value to the application-defined function. ^N is zero for the left-most
-** function argument. ^If there is no metadata
+** function argument. ^If there is no auxiliary data
** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
** returns a NULL pointer.
**
-** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
-** argument of the application-defined function. ^Subsequent
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the
+** N-th argument of the application-defined function. ^Subsequent
** calls to sqlite3_get_auxdata(C,N) return P from the most recent
-** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
-** NULL if the metadata has been discarded.
+** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or
+** NULL if the auxiliary data has been discarded.
** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
** SQLite will invoke the destructor function X with parameter P exactly
-** once, when the metadata is discarded.
-** SQLite is free to discard the metadata at any time, including: <ul>
+** once, when the auxiliary data is discarded.
+** SQLite is free to discard the auxiliary data at any time, including: <ul>
** <li> ^(when the corresponding function parameter changes)^, or
** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
** SQL statement)^, or
** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
** parameter)^, or
** <li> ^(during the original sqlite3_set_auxdata() call when a memory
-** allocation error occurs.)^ </ul>
+** allocation error occurs.)^
+** <li> ^(during the original sqlite3_set_auxdata() call if the function
+** is evaluated during query planning instead of during query execution,
+** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul>
**
-** Note the last bullet in particular. The destructor X in
+** Note the last two bullets in particular. The destructor X in
** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
** should be called near the end of the function implementation and the
** function implementation should not make any use of P after
-** sqlite3_set_auxdata() has been called.
-**
-** ^(In practice, metadata is preserved between function calls for
+** sqlite3_set_auxdata() has been called. Furthermore, a call to
+** sqlite3_get_auxdata() that occurs immediately after a corresponding call
+** to sqlite3_set_auxdata() might still return NULL if an out-of-memory
+** condition occurred during the sqlite3_set_auxdata() call or if the
+** function is being evaluated during query planning rather than during
+** query execution.
+**
+** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
** values and [parameters] and expressions composed from the same.)^
**
@@ -6190,10 +6277,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
**
** These routines must be called from the same thread in which
** the SQL function is running.
+**
+** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()].
*/
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+/*
+** CAPI3REF: Database Connection Client Data
+** METHOD: sqlite3
+**
+** These functions are used to associate one or more named pointers
+** with a [database connection].
+** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P
+** to be attached to [database connection] D using name N. Subsequent
+** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P
+** or a NULL pointer if there were no prior calls to
+** sqlite3_set_clientdata() with the same values of D and N.
+** Names are compared using strcmp() and are thus case sensitive.
+**
+** If P and X are both non-NULL, then the destructor X is invoked with
+** argument P on the first of the following occurrences:
+** <ul>
+** <li> An out-of-memory error occurs during the call to
+** sqlite3_set_clientdata() which attempts to register pointer P.
+** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made
+** with the same D and N parameters.
+** <li> The database connection closes. SQLite does not make any guarantees
+** about the order in which destructors are called, only that all
+** destructors will be called exactly once at some point during the
+** database connection closing process.
+** </ul>
+**
+** SQLite does not do anything with client data other than invoke
+** destructors on the client data at the appropriate time. The intended
+** use for client data is to provide a mechanism for wrapper libraries
+** to store additional information about an SQLite database connection.
+**
+** There is no limit (other than available memory) on the number of different
+** client data pointers (with different names) that can be attached to a
+** single database connection. However, the implementation is optimized
+** for the case of having only one or two different client data names.
+** Applications and wrapper libraries are discouraged from using more than
+** one client data name each.
+**
+** There is no way to enumerate the client data pointers
+** associated with a database connection. The N parameter can be thought
+** of as a secret key such that only code that knows the secret key is able
+** to access the associated data.
+**
+** Security Warning: These interfaces should not be exposed in scripting
+** languages or in other circumstances where it might be possible for an
+** an attacker to invoke them. Any agent that can invoke these interfaces
+** can probably also take control of the process.
+**
+** Database connection client data is only available for SQLite
+** version 3.44.0 ([dateof:3.44.0]) and later.
+**
+** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()].
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*);
+SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*));
/*
** CAPI3REF: Constants Defining Special Destructor Behavior
@@ -6395,6 +6539,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
+**
+** Every [application-defined SQL function] that invokes this interface
+** should include the [SQLITE_RESULT_SUBTYPE] property in its
+** text encoding argument when the SQL function is
+** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE]
+** property is omitted from the function that invokes sqlite3_result_subtype(),
+** then in some cases the sqlite3_result_subtype() might fail to set
+** the result subtype.
+**
+** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any
+** SQL function that invokes the sqlite3_result_subtype() interface
+** and that does not have the SQLITE_RESULT_SUBTYPE property will raise
+** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1
+** by default.
*/
SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
@@ -6826,7 +6984,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema);
/*
-** CAPI3REF: Allowed return values from [sqlite3_txn_state()]
+** CAPI3REF: Allowed return values from sqlite3_txn_state()
** KEYWORDS: {transaction state}
**
** These constants define the current transaction state of a database file.
@@ -6958,7 +7116,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all
** previous invocations for that database connection. ^If the callback
** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer,
-** then the autovacuum steps callback is cancelled. The return value
+** then the autovacuum steps callback is canceled. The return value
** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might
** be some other error code if something goes wrong. The current
** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other
@@ -7477,6 +7635,10 @@ struct sqlite3_module {
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
+ /* The methods above are in versions 1 through 3 of the sqlite_module object.
+ ** Those below are for version 4 and greater. */
+ int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema,
+ const char *zTabName, int mFlags, char **pzErr);
};
/*
@@ -7964,7 +8126,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
** code is returned and the transaction rolled back.
**
** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
+** open blob handle results in undefined behavior. ^Calling this routine
** with a null pointer (such as would be returned by a failed call to
** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
** is passed a valid open blob handle, the values returned by the
@@ -8191,9 +8353,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. In most cases the SQLite core only uses
+** sqlite3_mutex_try() as an optimization, so this is acceptable
+** behavior. The exceptions are unix builds that set the
+** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
+** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
@@ -8444,6 +8608,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SAVE 5
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_FK_NO_ACTION 7
#define SQLITE_TESTCTRL_BITVEC_TEST 8
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
@@ -8451,6 +8616,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
@@ -8472,7 +8638,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_TUNE 32
#define SQLITE_TESTCTRL_LOGEST 33
-#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */
+#define SQLITE_TESTCTRL_USELONGDOUBLE 34
+#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@@ -9928,7 +10095,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
-** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** the [xConnect] or [xCreate] methods of a [virtual table] implementation
** prohibits that virtual table from being used from within triggers and
** views.
** </dd>
@@ -10118,7 +10285,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*);
** communicated to the xBestIndex method as a
** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use
** this constraint, it must set the corresponding
-** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under
+** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under
** the usual mode of handling IN operators, SQLite generates [bytecode]
** that invokes the [xFilter|xFilter() method] once for each value
** on the right-hand side of the IN operator.)^ Thus the virtual table
@@ -10547,7 +10714,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** When the [sqlite3_blob_write()] API is used to update a blob column,
** the pre-update hook is invoked with SQLITE_DELETE. This is because the
** in this case the new values are not available. In this case, when a
-** callback made with op==SQLITE_DELETE is actuall a write using the
+** callback made with op==SQLITE_DELETE is actually a write using the
** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns
** the index of the column being written. In other cases, where the
** pre-update hook is being invoked for some other reason, including a
@@ -10808,6 +10975,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
** of the database exists.
**
+** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set,
+** the returned buffer content will remain accessible and unchanged
+** until either the next write operation on the connection or when
+** the connection is closed, and applications must not modify the
+** buffer. If the bit had been clear, the returned buffer will not
+** be accessed by SQLite after the call.
+**
** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
** allocation error occurs.
@@ -10856,6 +11030,9 @@ SQLITE_API unsigned char *sqlite3_serialize(
** SQLite will try to increase the buffer size using sqlite3_realloc64()
** if writes on the database cause it to grow larger than M bytes.
**
+** Applications must not modify the buffer P or invalidate it before
+** the database connection D is closed.
+**
** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
** database is currently in a read transaction or is involved in a backup
** operation.
@@ -10864,6 +11041,13 @@ SQLITE_API unsigned char *sqlite3_serialize(
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the
** function returns SQLITE_ERROR.
**
+** The deserialized database should not be in [WAL mode]. If the database
+** is in WAL mode, then any attempt to use the database file will result
+** in an [SQLITE_CANTOPEN] error. The application can set the
+** [file format version numbers] (bytes 18 and 19) of the input database P
+** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the
+** database file into rollback mode and work around this limitation.
+**
** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
** [sqlite3_free()] is invoked on argument P prior to returning.
@@ -11937,6 +12121,18 @@ SQLITE_API int sqlite3changeset_concat(
/*
+** CAPI3REF: Upgrade the Schema of a Changeset/Patchset
+*/
+SQLITE_API int sqlite3changeset_upgrade(
+ sqlite3 *db,
+ const char *zDb,
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+
+
+/*
** CAPI3REF: Changegroup Handle
**
** A changegroup is an object used to combine two or more
@@ -11983,6 +12179,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add a Schema to a Changegroup
+** METHOD: sqlite3_changegroup_schema
+**
+** This method may be used to optionally enforce the rule that the changesets
+** added to the changegroup handle must match the schema of database zDb
+** ("main", "temp", or the name of an attached database). If
+** sqlite3changegroup_add() is called to add a changeset that is not compatible
+** with the configured schema, SQLITE_SCHEMA is returned and the changegroup
+** object is left in an undefined state.
+**
+** A changeset schema is considered compatible with the database schema in
+** the same way as for sqlite3changeset_apply(). Specifically, for each
+** table in the changeset, there exists a database table with:
+**
+** <ul>
+** <li> The name identified by the changeset, and
+** <li> at least as many columns as recorded in the changeset, and
+** <li> the primary key columns in the same position as recorded in
+** the changeset.
+** </ul>
+**
+** The output of the changegroup object always has the same schema as the
+** database nominated using this function. In cases where changesets passed
+** to sqlite3changegroup_add() have fewer columns than the corresponding table
+** in the database schema, these are filled in using the default column
+** values from the database schema. This makes it possible to combined
+** changesets that have different numbers of columns for a single table
+** within a changegroup, provided that they are otherwise compatible.
+*/
+SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb);
+
+/*
** CAPI3REF: Add A Changeset To A Changegroup
** METHOD: sqlite3_changegroup
**
@@ -12050,13 +12278,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
** If the new changeset contains changes to a table that is already present
** in the changegroup, then the number of columns and the position of the
** primary key columns for the table must be consistent. If this is not the
-** case, this function fails with SQLITE_SCHEMA. If the input changeset
-** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
-** returned. Or, if an out-of-memory condition occurs during processing, this
-** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
-** of the final contents of the changegroup is undefined.
+** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup
+** object has been configured with a database schema using the
+** sqlite3changegroup_schema() API, then it is possible to combine changesets
+** with different numbers of columns for a single table, provided that
+** they are otherwise compatible.
**
-** If no error occurs, SQLITE_OK is returned.
+** If the input changeset appears to be corrupt and the corruption is
+** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition
+** occurs during processing, this function returns SQLITE_NOMEM.
+**
+** In all cases, if an error occurs the state of the final contents of the
+** changegroup is undefined. If no error occurs, SQLITE_OK is returned.
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
@@ -12321,10 +12554,17 @@ SQLITE_API int sqlite3changeset_apply_v2(
** <li>an insert change if all fields of the conflicting row match
** the row being inserted.
** </ul>
+**
+** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd>
+** If this flag it set, then all foreign key constraints in the target
+** database behave as if they were declared with "ON UPDATE NO ACTION ON
+** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
+** or SET DEFAULT.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004
+#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@@ -12890,8 +13130,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -12901,8 +13144,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -12918,12 +13163,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -12949,6 +13195,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -13063,6 +13313,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -13100,6 +13383,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -13294,8 +13584,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -13343,7 +13633,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -13352,7 +13642,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -13360,7 +13650,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
@@ -13471,7 +13761,7 @@ struct fts5_api {
** level of recursion for each term. A stack overflow can result
** if the number of terms is too large. In practice, most SQL
** never has more than 3 or 4 terms. Use a value of 0 to disable
-** any limit on the number of terms in a compount SELECT.
+** any limit on the number of terms in a compound SELECT.
*/
#ifndef SQLITE_MAX_COMPOUND_SELECT
# define SQLITE_MAX_COMPOUND_SELECT 500
@@ -13586,7 +13876,7 @@ struct fts5_api {
** max_page_count macro.
*/
#ifndef SQLITE_MAX_PAGE_COUNT
-# define SQLITE_MAX_PAGE_COUNT 1073741823
+# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */
#endif
/*
@@ -13716,6 +14006,29 @@ struct fts5_api {
#endif
/*
+** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit
+** SEH support if the -DSQLITE_OMIT_SEH option is given.
+*/
+#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH)
+# define SQLITE_USE_SEH 1
+#else
+# undef SQLITE_USE_SEH
+#endif
+
+/*
+** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly
+** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0
+*/
+#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1
+ /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */
+# undef SQLITE_DIRECT_OVERFLOW_READ
+#else
+ /* In all other cases, enable */
+# define SQLITE_DIRECT_OVERFLOW_READ 1
+#endif
+
+
+/*
** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
** 0 means mutexes are permanently disable and the library is never
** threadsafe. 1 means the library is serialized which is the highest
@@ -14574,8 +14887,31 @@ typedef INT16_TYPE LogEst;
** the end of buffer S. This macro returns true if P points to something
** contained within the buffer S.
*/
-#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
+#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
+/*
+** P is one byte past the end of a large buffer. Return true if a span of bytes
+** between S..E crosses the end of that buffer. In other words, return true
+** if the sub-buffer S..E-1 overflows the buffer whose last byte is P-1.
+**
+** S is the start of the span. E is one byte past the end of end of span.
+**
+** P
+** |-----------------| FALSE
+** |-------|
+** S E
+**
+** P
+** |-----------------|
+** |-------| TRUE
+** S E
+**
+** P
+** |-----------------|
+** |-------| FALSE
+** S E
+*/
+#define SQLITE_OVERFLOW(P,S,E) (((uptr)(S)<(uptr)(P))&&((uptr)(E)>(uptr)(P)))
/*
** Macros to determine whether the machine is big or little endian,
@@ -14585,16 +14921,33 @@ typedef INT16_TYPE LogEst;
** using C-preprocessor macros. If that is unsuccessful, or if
** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined
** at run-time.
+**
+** If you are building SQLite on some obscure platform for which the
+** following ifdef magic does not work, you can always include either:
+**
+** -DSQLITE_BYTEORDER=1234
+**
+** or
+**
+** -DSQLITE_BYTEORDER=4321
+**
+** to cause the build to work for little-endian or big-endian processors,
+** respectively.
*/
-#ifndef SQLITE_BYTEORDER
-# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */
+# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+# define SQLITE_BYTEORDER 4321
+# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+# define SQLITE_BYTEORDER 1234
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1
+# define SQLITE_BYTEORDER 4321
+# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
-# define SQLITE_BYTEORDER 1234
-# elif defined(sparc) || defined(__ppc__) || \
- defined(__ARMEB__) || defined(__AARCH64EB__)
-# define SQLITE_BYTEORDER 4321
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__)
+# define SQLITE_BYTEORDER 4321
# else
# define SQLITE_BYTEORDER 0
# endif
@@ -14809,7 +15162,7 @@ struct BusyHandler {
/*
** Name of table that holds the database schema.
**
-** The PREFERRED names are used whereever possible. But LEGACY is also
+** The PREFERRED names are used wherever possible. But LEGACY is also
** used for backwards compatibility.
**
** 1. Queries can use either the PREFERRED or the LEGACY names
@@ -14918,11 +15271,13 @@ typedef struct Column Column;
typedef struct Cte Cte;
typedef struct CteUse CteUse;
typedef struct Db Db;
+typedef struct DbClientData DbClientData;
typedef struct DbFixer DbFixer;
typedef struct Schema Schema;
typedef struct Expr Expr;
typedef struct ExprList ExprList;
typedef struct FKey FKey;
+typedef struct FpDecode FpDecode;
typedef struct FuncDestructor FuncDestructor;
typedef struct FuncDef FuncDef;
typedef struct FuncDefHash FuncDefHash;
@@ -14941,6 +15296,7 @@ typedef struct Parse Parse;
typedef struct ParseCleanup ParseCleanup;
typedef struct PreUpdate PreUpdate;
typedef struct PrintfArguments PrintfArguments;
+typedef struct RCStr RCStr;
typedef struct RenameToken RenameToken;
typedef struct Returning Returning;
typedef struct RowSet RowSet;
@@ -15554,7 +15910,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*);
SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*);
SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*);
SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*);
-SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *);
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, u64*);
SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*);
SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *);
@@ -15578,6 +15934,10 @@ SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*);
# define enable_simulated_io_errors()
#endif
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager*);
+#endif
+
#endif /* SQLITE_PAGER_H */
/************** End of pager.h ***********************************************/
@@ -15907,9 +16267,7 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags);
SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*);
SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*);
SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*);
-#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*);
-#endif
SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
@@ -16139,6 +16497,7 @@ typedef struct VdbeOpList VdbeOpList;
#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */
#define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */
#define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */
+#define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */
/* Error message codes for OP_Halt */
#define P5_ConstraintNotNull 1
@@ -16354,19 +16713,22 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_VCreate 171
#define OP_VDestroy 172
#define OP_VOpen 173
-#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */
-#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 176
-#define OP_Pagecount 177
-#define OP_MaxPgcnt 178
-#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */
-#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */
-#define OP_Trace 181
-#define OP_CursorHint 182
-#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */
-#define OP_Noop 184
-#define OP_Explain 185
-#define OP_Abortable 186
+#define OP_VCheck 174
+#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */
+#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 177
+#define OP_Pagecount 178
+#define OP_MaxPgcnt 179
+#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */
+#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */
+#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */
+#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */
+#define OP_Trace 184
+#define OP_CursorHint 185
+#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */
+#define OP_Noop 187
+#define OP_Explain 188
+#define OP_Abortable 189
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -16384,7 +16746,7 @@ typedef struct VdbeOpList VdbeOpList;
/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\
/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\
/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\
-/* 32 */ 0x41, 0x01, 0x01, 0x01, 0x41, 0x01, 0x41, 0x41,\
+/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\
/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\
/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\
@@ -16396,14 +16758,14 @@ typedef struct VdbeOpList VdbeOpList;
/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\
/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\
/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\
-/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50,\
+/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\
/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\
/* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\
/* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\
/* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x50, 0x40,\
-/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\
-/* 184 */ 0x00, 0x00, 0x00,}
+/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\
+/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\
+/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}
/* The resolve3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -16578,7 +16940,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** The VdbeCoverage macros are used to set a coverage testing point
** for VDBE branch instructions. The coverage testing points are line
** numbers in the sqlite3.c source file. VDBE branch coverage testing
-** only works with an amalagmation build. That's ok since a VDBE branch
+** only works with an amalgamation build. That's ok since a VDBE branch
** coverage build designed for testing the test suite only. No application
** should ever ship with VDBE branch coverage measuring turned on.
**
@@ -16596,7 +16958,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** // NULL option is not possible
**
** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested
-** // in distingishing equal and not-equal.
+** // in distinguishing equal and not-equal.
**
** Every VDBE branch operation must be tagged with one of the macros above.
** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
@@ -16606,7 +16968,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
** During testing, the test application will invoke
** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback
** routine that is invoked as each bytecode branch is taken. The callback
-** contains the sqlite3.c source line number ov the VdbeCoverage macro and
+** contains the sqlite3.c source line number of the VdbeCoverage macro and
** flags to indicate whether or not the branch was taken. The test application
** is responsible for keeping track of this and reporting byte-code branches
** that are never taken.
@@ -16945,7 +17307,7 @@ SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
/*
** Default synchronous levels.
**
-** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ
+** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ
** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
**
** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS
@@ -16984,7 +17346,7 @@ struct Db {
** An instance of the following structure stores a database schema.
**
** Most Schema objects are associated with a Btree. The exception is
-** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing.
+** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing.
** In shared cache mode, a single Schema object can be shared by multiple
** Btrees that refer to the same underlying BtShared object.
**
@@ -17095,7 +17457,7 @@ struct Lookaside {
LookasideSlot *pInit; /* List of buffers not previously used */
LookasideSlot *pFree; /* List of available buffers */
#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
- LookasideSlot *pSmallInit; /* List of small buffers not prediously used */
+ LookasideSlot *pSmallInit; /* List of small buffers not previously used */
LookasideSlot *pSmallFree; /* List of available small buffers */
void *pMiddle; /* First byte past end of full-size buffers and
** the first byte of LOOKASIDE_SMALL buffers */
@@ -17112,7 +17474,7 @@ struct LookasideSlot {
#define EnableLookaside db->lookaside.bDisable--;\
db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue
-/* Size of the smaller allocations in two-size lookside */
+/* Size of the smaller allocations in two-size lookaside */
#ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE
# define LOOKASIDE_SMALL 0
#else
@@ -17312,6 +17674,7 @@ struct sqlite3 {
i64 nDeferredCons; /* Net deferred constraints this transaction. */
i64 nDeferredImmCons; /* Net deferred immediate constraints */
int *pnBytesFreed; /* If not NULL, increment this in DbFree() */
+ DbClientData *pDbData; /* sqlite3_set_clientdata() content */
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
/* The following variables are all protected by the STATIC_MAIN
** mutex, not by sqlite3.mutex. They are used by code in notify.c.
@@ -17394,6 +17757,7 @@ struct sqlite3 {
/* the count using a callback. */
#define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */
#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */
+#define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */
/* Flags used only if debugging */
#ifdef SQLITE_DEBUG
@@ -17451,6 +17815,7 @@ struct sqlite3 {
#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */
#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */
#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */
+#define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */
#define SQLITE_AllOpts 0xffffffff /* All optimizations */
/*
@@ -17533,6 +17898,7 @@ struct FuncDestructor {
** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd
** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG
** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
+** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG
** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API
** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!!
@@ -17540,7 +17906,7 @@ struct FuncDestructor {
**
** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the
** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is
-** used internally and if set means tha the function has side effects.
+** used internally and if set means that the function has side effects.
** SQLITE_INNOCUOUS is used by application code and means "not unsafe".
** See multiple instances of tag-20230109-1.
*/
@@ -17551,6 +17917,7 @@ struct FuncDestructor {
#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/
#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */
#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */
+#define SQLITE_FUNC_BYTELEN 0x00c0 /* Built-in octet_length() function */
#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */
/* 0x0200 -- available for reuse */
#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */
@@ -17559,14 +17926,15 @@ struct FuncDestructor {
#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
** single query - might change over time */
#define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */
-/* 0x8000 -- available for reuse */
+#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */
#define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */
#define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */
#define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */
-#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */
+/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */
#define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */
#define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */
#define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */
+/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */
#define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */
/* Identifier numbers for each in-line function */
@@ -17658,10 +18026,11 @@ struct FuncDestructor {
#define MFUNCTION(zName, nArg, xPtr, xFunc) \
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
xPtr, 0, xFunc, 0, 0, 0, #zName, {0} }
-#define JFUNCTION(zName, nArg, iArg, xFunc) \
- {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|\
- SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \
+ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\
+ SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\
+ ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \
+ SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} }
#define INLINE_FUNC(zName, nArg, iArg, mFlags) \
{nArg, SQLITE_FUNC_BUILTIN|\
SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \
@@ -18130,7 +18499,7 @@ struct FKey {
** foreign key.
**
** The OE_Default value is a place holder that means to use whatever
-** conflict resolution algorthm is required from context.
+** conflict resolution algorithm is required from context.
**
** The following symbolic values are used to record which type
** of conflict resolution action to take.
@@ -18296,6 +18665,7 @@ struct Index {
unsigned isCovering:1; /* True if this is a covering index */
unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */
+ unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */
unsigned bNoQuery:1; /* Do not use this index to optimize queries */
unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */
unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */
@@ -18406,6 +18776,10 @@ struct AggInfo {
FuncDef *pFunc; /* The aggregate function implementation */
int iDistinct; /* Ephemeral table used to enforce DISTINCT */
int iDistAddr; /* Address of OP_OpenEphemeral */
+ int iOBTab; /* Ephemeral table to implement ORDER BY */
+ u8 bOBPayload; /* iOBTab has payload columns separate from key */
+ u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */
+ u8 bUseSubtype; /* Transfer subtype info through sorter */
} *aFunc;
int nFunc; /* Number of entries in aFunc[] */
u32 selId; /* Select to which this AggInfo belongs */
@@ -18544,7 +18918,7 @@ struct Expr {
** TK_REGISTER: register number
** TK_TRIGGER: 1 -> new, 0 -> old
** EP_Unlikely: 134217728 times likelihood
- ** TK_IN: ephemerial table holding RHS
+ ** TK_IN: ephemeral table holding RHS
** TK_SELECT_COLUMN: Number of columns on the LHS
** TK_SELECT: 1st register of result vector */
ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
@@ -18590,7 +18964,7 @@ struct Expr {
#define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
#define EP_Win 0x008000 /* Contains window functions */
#define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
- /* 0x020000 // Available for reuse */
+#define EP_FullSize 0x020000 /* Expr structure must remain full sized */
#define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */
#define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */
#define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
@@ -18620,12 +18994,15 @@ struct Expr {
#define ExprClearProperty(E,P) (E)->flags&=~(P)
#define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue)
#define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse)
+#define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0)
/* Macros used to ensure that the correct members of unions are accessed
** in Expr.
*/
#define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0)
#define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0)
+#define ExprUseWOfst(E) (((E)->flags&(EP_InnerON|EP_OuterON))==0)
+#define ExprUseWJoin(E) (((E)->flags&(EP_InnerON|EP_OuterON))!=0)
#define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0)
#define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0)
#define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0)
@@ -18735,6 +19112,7 @@ struct ExprList {
#define ENAME_NAME 0 /* The AS clause of a result set */
#define ENAME_SPAN 1 /* Complete text of the result set expression */
#define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */
+#define ENAME_ROWID 3 /* "DB.TABLE._rowid_" for * expansion of rowid */
/*
** An instance of this structure can hold a simple list of identifiers,
@@ -18814,7 +19192,7 @@ struct SrcItem {
unsigned notCte :1; /* This item may not match a CTE */
unsigned isUsing :1; /* u3.pUsing is valid */
unsigned isOn :1; /* u3.pOn was once valid and non-NULL */
- unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */
+ unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */
unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */
} fg;
int iCursor; /* The VDBE cursor number used to access this table */
@@ -18935,6 +19313,7 @@ struct NameContext {
int nRef; /* Number of names resolved by this context */
int nNcErr; /* Number of errors encountered while resolving names */
int ncFlags; /* Zero or more NC_* flags defined below */
+ u32 nNestedSelect; /* Number of nested selects using this NC */
Select *pWinSelect; /* SELECT statement for any window functions */
};
@@ -19343,6 +19722,7 @@ struct Parse {
int *aLabel; /* Space to hold the labels */
ExprList *pConstExpr;/* Constant expressions */
IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */
+ IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */
Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
@@ -19350,6 +19730,9 @@ struct Parse {
int regRoot; /* Register holding root page number for new objects */
int nMaxArg; /* Max args passed to user function by sub-program */
int nSelect; /* Number of SELECT stmts. Counter for Select.selId */
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */
+#endif
#ifndef SQLITE_OMIT_SHARED_CACHE
int nTableLock; /* Number of locks in aTableLock */
TableLock *aTableLock; /* Required table locks for shared-cache mode */
@@ -19363,12 +19746,9 @@ struct Parse {
int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */
Returning *pReturning; /* The RETURNING clause */
} u1;
- u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u32 oldmask; /* Mask of old.* columns referenced */
u32 newmask; /* Mask of new.* columns referenced */
-#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
- u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */
-#endif
+ LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */
u8 bReturning; /* Coding a RETURNING trigger */
u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
@@ -19492,6 +19872,7 @@ struct AuthContext {
#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */
#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
+#define OPFLAG_BYTELENARG 0xc0 /* OP_Column only for octet_length() */
#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */
#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */
@@ -19613,6 +19994,7 @@ struct Returning {
int iRetCur; /* Transient table holding RETURNING results */
int nRetCol; /* Number of in pReturnEL after expansion */
int iRetReg; /* Register array for holding a row of RETURNING */
+ char zName[40]; /* Name of trigger: "sqlite_returning_%p" */
};
/*
@@ -19634,6 +20016,28 @@ struct sqlite3_str {
#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0)
+/*
+** The following object is the header for an "RCStr" or "reference-counted
+** string". An RCStr is passed around and used like any other char*
+** that has been dynamically allocated. The important interface
+** differences:
+**
+** 1. RCStr strings are reference counted. They are deallocated
+** when the reference count reaches zero.
+**
+** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than
+** sqlite3_free()
+**
+** 3. Make a (read-only) copy of a read-only RCStr string using
+** sqlite3RCStrRef().
+**
+** "String" is in the name, but an RCStr object can also be used to hold
+** binary data.
+*/
+struct RCStr {
+ u64 nRCRef; /* Number of references */
+ /* Total structure size should be a multiple of 8 bytes for alignment */
+};
/*
** A pointer to this structure is used to communicate information
@@ -19660,7 +20064,7 @@ typedef struct {
/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled
** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning
** parameters are for temporary use during development, to help find
-** optimial values for parameters in the query planner. The should not
+** optimal values for parameters in the query planner. The should not
** be used on trunk check-ins. They are a temporary mechanism available
** for transient development builds only.
**
@@ -19686,6 +20090,10 @@ struct Sqlite3Config {
u8 bUseCis; /* Use covering indices for full-scans */
u8 bSmallMalloc; /* Avoid large memory allocations if true */
u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */
+ u8 bUseLongDouble; /* Make use of long double */
+#ifdef SQLITE_DEBUG
+ u8 bJsonSelfcheck; /* Double-check JSON parsing */
+#endif
int mxStrlen; /* Maximum string length */
int neverCorrupt; /* Database is always well-formed */
int szLookaside; /* Default lookaside buffer size */
@@ -19772,6 +20180,7 @@ struct Walker {
void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */
int walkerDepth; /* Number of subqueries */
u16 eCode; /* A small processing code */
+ u16 mWFlags; /* Use-dependent flags */
union { /* Extra data for callback */
NameContext *pNC; /* Naming context */
int n; /* A counter */
@@ -19811,6 +20220,7 @@ struct DbFixer {
/* Forward declarations */
SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*);
+SQLITE_PRIVATE int sqlite3WalkExprNN(Walker*, Expr*);
SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*);
SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*);
SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);
@@ -19891,6 +20301,16 @@ struct CteUse {
};
+/* Client data associated with sqlite3_set_clientdata() and
+** sqlite3_get_clientdata().
+*/
+struct DbClientData {
+ DbClientData *pNext; /* Next in a linked list */
+ void *pData; /* The data */
+ void (*xDestructor)(void*); /* Destructor. Might be NULL */
+ char zName[1]; /* Name of this client data. MUST BE LAST */
+};
+
#ifdef SQLITE_DEBUG
/*
** An instance of the TreeView object is used for printing the content of
@@ -20192,6 +20612,20 @@ struct PrintfArguments {
sqlite3_value **apArg; /* The argument values */
};
+/*
+** An instance of this object receives the decoding of a floating point
+** value into an approximate decimal representation.
+*/
+struct FpDecode {
+ char sign; /* '+' or '-' */
+ char isSpecial; /* 1: Infinity 2: NaN */
+ int n; /* Significant digits in the decode */
+ int iDP; /* Location of the decimal point */
+ char *z; /* Start of significant digits */
+ char zBuf[24]; /* Storage for significant digits */
+};
+
+SQLITE_PRIVATE void sqlite3FpDecode(FpDecode*,double,int,int);
SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...);
SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list);
#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
@@ -20281,9 +20715,12 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int);
+SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*);
+SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*);
SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*);
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
+SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*);
SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*);
SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
@@ -20293,6 +20730,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int);
SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int);
SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*);
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
+SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*);
SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
@@ -20383,6 +20821,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
+SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3*, void*);
SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*);
#ifndef SQLITE_OMIT_AUTOINCREMENT
SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse);
@@ -20419,6 +20858,7 @@ SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*);
SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
Expr*,ExprList*,u32,Expr*);
SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*);
+SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*);
SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*);
SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int);
@@ -20482,7 +20922,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int)
SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int);
SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int);
SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int);
-SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int);
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int,int);
SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*);
SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*);
SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
@@ -20517,6 +20957,7 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*);
SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char);
SQLITE_PRIVATE int sqlite3IsRowid(const char*);
+SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab);
SQLITE_PRIVATE void sqlite3GenerateRowDelete(
Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int);
SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);
@@ -20631,6 +21072,7 @@ SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);
SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*);
SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
+
SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64);
SQLITE_PRIVATE i64 sqlite3RealToI64(double);
SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*);
@@ -20643,6 +21085,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar);
#endif
SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte);
SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**);
+SQLITE_PRIVATE int sqlite3Utf8ReadLimited(const u8*, int, u32*);
SQLITE_PRIVATE LogEst sqlite3LogEst(u64);
SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst);
SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double);
@@ -20735,6 +21178,7 @@ SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*);
SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8);
SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8);
+SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value*, void(*)(void*));
SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8);
SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8,
void(*)(void*));
@@ -20786,7 +21230,8 @@ SQLITE_PRIVATE int sqlite3MatchEName(
const struct ExprList_item*,
const char*,
const char*,
- const char*
+ const char*,
+ int*
);
SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*);
SQLITE_PRIVATE u8 sqlite3StrIHash(const char*);
@@ -20842,6 +21287,11 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);
SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
+SQLITE_PRIVATE char *sqlite3RCStrRef(char*);
+SQLITE_PRIVATE void sqlite3RCStrUnref(void*);
+SQLITE_PRIVATE char *sqlite3RCStrNew(u64);
+SQLITE_PRIVATE char *sqlite3RCStrResize(char*,u64);
+
SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64);
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
@@ -20982,6 +21432,7 @@ SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8);
SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*);
SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*);
SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*);
+SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3*,void*);
SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8);
#else
# define sqlite3CteNew(P,T,E,S) ((void*)0)
@@ -21093,6 +21544,7 @@ SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int);
#define sqlite3SelectExprHeight(x) 0
#define sqlite3ExprCheckHeight(x,y)
#endif
+SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr*,int);
SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*);
SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);
@@ -21378,9 +21830,6 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
"4_BYTE_ALIGNED_MALLOC",
#endif
-#ifdef SQLITE_64BIT_STATS
- "64BIT_STATS",
-#endif
#ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN
# if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1
"ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN),
@@ -21676,6 +22125,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
"EXPLAIN_ESTIMATED_ROWS",
#endif
+#ifdef SQLITE_EXTRA_AUTOEXT
+ "EXTRA_AUTOEXT=" CTIMEOPT_VAL(SQLITE_EXTRA_AUTOEXT),
+#endif
#ifdef SQLITE_EXTRA_IFNULLROW
"EXTRA_IFNULLROW",
#endif
@@ -21717,6 +22169,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX
"INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX),
#endif
+#ifdef SQLITE_LEGACY_JSON_VALID
+ "LEGACY_JSON_VALID",
+#endif
#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
"LIKE_DOESNT_MATCH_BLOBS",
#endif
@@ -21954,6 +22409,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
"OMIT_SCHEMA_VERSION_PRAGMAS",
#endif
+#ifdef SQLITE_OMIT_SEH
+ "OMIT_SEH",
+#endif
#ifdef SQLITE_OMIT_SHARED_CACHE
"OMIT_SHARED_CACHE",
#endif
@@ -22351,6 +22809,10 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
0, /* bSmallMalloc */
1, /* bExtraSchemaChecks */
+ sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */
+#ifdef SQLITE_DEBUG
+ 0, /* bJsonSelfcheck */
+#endif
0x7ffffffe, /* mxStrlen */
0, /* neverCorrupt */
SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */
@@ -22580,6 +23042,9 @@ typedef struct VdbeSorter VdbeSorter;
/* Elements of the linked list at Vdbe.pAuxData */
typedef struct AuxData AuxData;
+/* A cache of large TEXT or BLOB values in a VdbeCursor */
+typedef struct VdbeTxtBlbCache VdbeTxtBlbCache;
+
/* Types of VDBE cursors */
#define CURTYPE_BTREE 0
#define CURTYPE_SORTER 1
@@ -22611,6 +23076,7 @@ struct VdbeCursor {
Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */
+ Bool colCache:1; /* pCache pointer is initialized and non-NULL */
u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */
union { /* pBtx for isEphermeral. pAltMap otherwise */
Btree *pBtx; /* Separate file holding temporary table */
@@ -22651,6 +23117,7 @@ struct VdbeCursor {
#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
u64 maskUsed; /* Mask of columns used by this cursor */
#endif
+ VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */
/* 2*nField extra array elements allocated for aType[], beyond the one
** static element declared in the structure. nField total array slots for
@@ -22663,13 +23130,26 @@ struct VdbeCursor {
#define IsNullCursor(P) \
((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0)
-
/*
** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
*/
#define CACHE_STALE 0
/*
+** Large TEXT or BLOB values can be slow to load, so we want to avoid
+** loading them more than once. For that reason, large TEXT and BLOB values
+** can be stored in a cache defined by this object, and attached to the
+** VdbeCursor using the pCache field.
+*/
+struct VdbeTxtBlbCache {
+ char *pCValue; /* A RCStr buffer to hold the value */
+ i64 iOffset; /* File offset of the row being cached */
+ int iCol; /* Column for which the cache is valid */
+ u32 cacheStatus; /* Vdbe.cacheCtr value */
+ u32 colCacheCtr; /* Column cache counter */
+};
+
+/*
** When a sub-program is executed (OP_Program), a structure of this type
** is allocated to store the current value of the program counter, as
** well as the current memory cell array and various other frame specific
@@ -22989,16 +23469,18 @@ struct Vdbe {
u32 nWrite; /* Number of write operations that have occurred */
#endif
u16 nResColumn; /* Number of columns in one row of the result set */
+ u16 nResAlloc; /* Column slots allocated to aColName[] */
u8 errorAction; /* Recovery action to do in case of an error */
u8 minWriteFileFormat; /* Minimum file format for writable database files */
u8 prepFlags; /* SQLITE_PREPARE_* flags */
u8 eVdbeState; /* On of the VDBE_*_STATE values */
bft expired:2; /* 1: recompile VM immediately 2: when convenient */
- bft explain:2; /* True if EXPLAIN present on SQL command */
+ bft explain:2; /* 0: normal, 1: EXPLAIN, 2: EXPLAIN QUERY PLAN */
bft changeCntOn:1; /* True to update the change-counter */
bft usesStmtJournal:1; /* True if uses a statement journal */
bft readOnly:1; /* True for statements that do not write */
bft bIsReader:1; /* True for statements that read */
+ bft haveEqpOps:1; /* Bytecode supports EXPLAIN QUERY PLAN */
yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */
yDbMask lockMask; /* Subset of btreeMask that requires a lock */
u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */
@@ -23045,7 +23527,7 @@ struct PreUpdate {
i64 iKey1; /* First key value passed to hook */
i64 iKey2; /* Second key value passed to hook */
Mem *aNew; /* Array of new.* values */
- Table *pTab; /* Schema object being upated */
+ Table *pTab; /* Schema object being updated */
Index *pPk; /* PK index if pTab is WITHOUT ROWID */
};
@@ -23135,6 +23617,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetZeroBlob(Mem*,int);
SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*);
#endif
SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double);
@@ -23582,7 +24065,7 @@ SQLITE_API int sqlite3_db_status(
case SQLITE_DBSTATUS_CACHE_MISS:
case SQLITE_DBSTATUS_CACHE_WRITE:{
int i;
- int nRet = 0;
+ u64 nRet = 0;
assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 );
assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 );
@@ -23595,7 +24078,7 @@ SQLITE_API int sqlite3_db_status(
*pHighwater = 0; /* IMP: R-42420-56072 */
/* IMP: R-54100-20147 */
/* IMP: R-29431-39229 */
- *pCurrent = nRet;
+ *pCurrent = (int)nRet & 0x7fffffff;
break;
}
@@ -23731,8 +24214,8 @@ struct DateTime {
*/
static int getDigits(const char *zDate, const char *zFormat, ...){
/* The aMx[] array translates the 3rd character of each format
- ** spec into a max size: a b c d e f */
- static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 };
+ ** spec into a max size: a b c d e f */
+ static const u16 aMx[] = { 12, 14, 24, 31, 59, 14712 };
va_list ap;
int cnt = 0;
char nextC;
@@ -24073,17 +24556,14 @@ static void computeYMD(DateTime *p){
** Compute the Hour, Minute, and Seconds from the julian day number.
*/
static void computeHMS(DateTime *p){
- int s;
+ int day_ms, day_min; /* milliseconds, minutes into the day */
if( p->validHMS ) return;
computeJD(p);
- s = (int)((p->iJD + 43200000) % 86400000);
- p->s = s/1000.0;
- s = (int)p->s;
- p->s -= s;
- p->h = s/3600;
- s -= p->h*3600;
- p->m = s/60;
- p->s += s - p->m*60;
+ day_ms = (int)((p->iJD + 43200000) % 86400000);
+ p->s = (day_ms % 60000)/1000.0;
+ day_min = day_ms/60000;
+ p->m = day_min % 60;
+ p->h = day_min / 60;
p->rawS = 0;
p->validHMS = 1;
}
@@ -24263,6 +24743,25 @@ static const struct {
};
/*
+** If the DateTime p is raw number, try to figure out if it is
+** a julian day number of a unix timestamp. Set the p value
+** appropriately.
+*/
+static void autoAdjustDate(DateTime *p){
+ if( !p->rawS || p->validJD ){
+ p->rawS = 0;
+ }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */
+ && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */
+ ){
+ double r = p->s*1000.0 + 210866760000000.0;
+ clearYMD_HMS_TZ(p);
+ p->iJD = (sqlite3_int64)(r + 0.5);
+ p->validJD = 1;
+ p->rawS = 0;
+ }
+}
+
+/*
** Process a modifier to a date-time stamp. The modifiers are
** as follows:
**
@@ -24305,19 +24804,8 @@ static int parseModifier(
*/
if( sqlite3_stricmp(z, "auto")==0 ){
if( idx>1 ) return 1; /* IMP: R-33611-57934 */
- if( !p->rawS || p->validJD ){
- rc = 0;
- p->rawS = 0;
- }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */
- && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */
- ){
- r = p->s*1000.0 + 210866760000000.0;
- clearYMD_HMS_TZ(p);
- p->iJD = (sqlite3_int64)(r + 0.5);
- p->validJD = 1;
- p->rawS = 0;
- rc = 0;
- }
+ autoAdjustDate(p);
+ rc = 0;
}
break;
}
@@ -24483,18 +24971,73 @@ static int parseModifier(
case '9': {
double rRounder;
int i;
- for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){}
+ int Y,M,D,h,m,x;
+ const char *z2 = z;
+ char z0 = z[0];
+ for(n=1; z[n]; n++){
+ if( z[n]==':' ) break;
+ if( sqlite3Isspace(z[n]) ) break;
+ if( z[n]=='-' ){
+ if( n==5 && getDigits(&z[1], "40f", &Y)==1 ) break;
+ if( n==6 && getDigits(&z[1], "50f", &Y)==1 ) break;
+ }
+ }
if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){
- rc = 1;
+ assert( rc==1 );
break;
}
- if( z[n]==':' ){
+ if( z[n]=='-' ){
+ /* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the
+ ** specified number of years, months, and days. MM is limited to
+ ** the range 0-11 and DD is limited to 0-30.
+ */
+ if( z0!='+' && z0!='-' ) break; /* Must start with +/- */
+ if( n==5 ){
+ if( getDigits(&z[1], "40f-20a-20d", &Y, &M, &D)!=3 ) break;
+ }else{
+ assert( n==6 );
+ if( getDigits(&z[1], "50f-20a-20d", &Y, &M, &D)!=3 ) break;
+ z++;
+ }
+ if( M>=12 ) break; /* M range 0..11 */
+ if( D>=31 ) break; /* D range 0..30 */
+ computeYMD_HMS(p);
+ p->validJD = 0;
+ if( z0=='-' ){
+ p->Y -= Y;
+ p->M -= M;
+ D = -D;
+ }else{
+ p->Y += Y;
+ p->M += M;
+ }
+ x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
+ p->Y += x;
+ p->M -= x*12;
+ computeJD(p);
+ p->validHMS = 0;
+ p->validYMD = 0;
+ p->iJD += (i64)D*86400000;
+ if( z[11]==0 ){
+ rc = 0;
+ break;
+ }
+ if( sqlite3Isspace(z[11])
+ && getDigits(&z[12], "20c:20e", &h, &m)==2
+ ){
+ z2 = &z[12];
+ n = 2;
+ }else{
+ break;
+ }
+ }
+ if( z2[n]==':' ){
/* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the
** specified number of hours, minutes, seconds, and fractional seconds
** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be
** omitted.
*/
- const char *z2 = z;
+
DateTime tx;
sqlite3_int64 day;
if( !sqlite3Isdigit(*z2) ) z2++;
@@ -24504,7 +25047,7 @@ static int parseModifier(
tx.iJD -= 43200000;
day = tx.iJD/86400000;
tx.iJD -= day*86400000;
- if( z[0]=='-' ) tx.iJD = -tx.iJD;
+ if( z0=='-' ) tx.iJD = -tx.iJD;
computeJD(p);
clearYMD_HMS_TZ(p);
p->iJD += tx.iJD;
@@ -24520,7 +25063,7 @@ static int parseModifier(
if( n>10 || n<3 ) break;
if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--;
computeJD(p);
- rc = 1;
+ assert( rc==1 );
rRounder = r<0 ? -0.5 : +0.5;
for(i=0; i<ArraySize(aXformType); i++){
if( aXformType[i].nName==n
@@ -24529,7 +25072,6 @@ static int parseModifier(
){
switch( i ){
case 4: { /* Special processing to add months */
- int x;
assert( strcmp(aXformType[i].zName,"month")==0 );
computeYMD_HMS(p);
p->M += (int)r;
@@ -24605,6 +25147,12 @@ static int isDate(
}
computeJD(p);
if( p->isError || !validJulianDay(p->iJD) ) return 1;
+ if( argc==1 && p->validYMD && p->D>28 ){
+ /* Make sure a YYYY-MM-DD is normalized.
+ ** Example: 2023-02-31 -> 2023-03-03 */
+ assert( p->validJD );
+ p->validYMD = 0;
+ }
return 0;
}
@@ -24688,7 +25236,7 @@ static void datetimeFunc(
zBuf[16] = '0' + (x.m)%10;
zBuf[17] = ':';
if( x.useSubsec ){
- s = (int)1000.0*x.s;
+ s = (int)(1000.0*x.s + 0.5);
zBuf[18] = '0' + (s/10000)%10;
zBuf[19] = '0' + (s/1000)%10;
zBuf[20] = '.';
@@ -24735,7 +25283,7 @@ static void timeFunc(
zBuf[4] = '0' + (x.m)%10;
zBuf[5] = ':';
if( x.useSubsec ){
- s = (int)1000.0*x.s;
+ s = (int)(1000.0*x.s + 0.5);
zBuf[6] = '0' + (s/10000)%10;
zBuf[7] = '0' + (s/1000)%10;
zBuf[8] = '.';
@@ -24806,7 +25354,7 @@ static void dateFunc(
** %M minute 00-59
** %s seconds since 1970-01-01
** %S seconds 00-59
-** %w day of week 0-6 sunday==0
+** %w day of week 0-6 Sunday==0
** %W week of year 00-53
** %Y year 0000-9999
** %% %
@@ -24832,13 +25380,16 @@ static void strftimeFunc(
computeJD(&x);
computeYMD_HMS(&x);
for(i=j=0; zFmt[i]; i++){
+ char cf;
if( zFmt[i]!='%' ) continue;
if( j<i ) sqlite3_str_append(&sRes, zFmt+j, (int)(i-j));
i++;
j = i + 1;
- switch( zFmt[i] ){
- case 'd': {
- sqlite3_str_appendf(&sRes, "%02d", x.D);
+ cf = zFmt[i];
+ switch( cf ){
+ case 'd': /* Fall thru */
+ case 'e': {
+ sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D);
break;
}
case 'f': {
@@ -24847,8 +25398,21 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes, "%06.3f", s);
break;
}
- case 'H': {
- sqlite3_str_appendf(&sRes, "%02d", x.h);
+ case 'F': {
+ sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D);
+ break;
+ }
+ case 'H':
+ case 'k': {
+ sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h);
+ break;
+ }
+ case 'I': /* Fall thru */
+ case 'l': {
+ int h = x.h;
+ if( h>12 ) h -= 12;
+ if( h==0 ) h = 12;
+ sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h);
break;
}
case 'W': /* Fall thru */
@@ -24860,7 +25424,7 @@ static void strftimeFunc(
y.D = 1;
computeJD(&y);
nDay = (int)((x.iJD-y.iJD+43200000)/86400000);
- if( zFmt[i]=='W' ){
+ if( cf=='W' ){
int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
wd = (int)(((x.iJD+43200000)/86400000)%7);
sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7);
@@ -24881,6 +25445,19 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes,"%02d",x.m);
break;
}
+ case 'p': /* Fall thru */
+ case 'P': {
+ if( x.h>=12 ){
+ sqlite3_str_append(&sRes, cf=='p' ? "PM" : "pm", 2);
+ }else{
+ sqlite3_str_append(&sRes, cf=='p' ? "AM" : "am", 2);
+ }
+ break;
+ }
+ case 'R': {
+ sqlite3_str_appendf(&sRes, "%02d:%02d", x.h, x.m);
+ break;
+ }
case 's': {
if( x.useSubsec ){
sqlite3_str_appendf(&sRes,"%.3f",
@@ -24895,9 +25472,15 @@ static void strftimeFunc(
sqlite3_str_appendf(&sRes,"%02d",(int)x.s);
break;
}
+ case 'T': {
+ sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s);
+ break;
+ }
+ case 'u': /* Fall thru */
case 'w': {
- sqlite3_str_appendchar(&sRes, 1,
- (char)(((x.iJD+129600000)/86400000) % 7) + '0');
+ char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0';
+ if( c=='0' && cf=='u' ) c = '7';
+ sqlite3_str_appendchar(&sRes, 1, c);
break;
}
case 'Y': {
@@ -24947,6 +25530,117 @@ static void cdateFunc(
}
/*
+** timediff(DATE1, DATE2)
+**
+** Return the amount of time that must be added to DATE2 in order to
+** convert it into DATE2. The time difference format is:
+**
+** +YYYY-MM-DD HH:MM:SS.SSS
+**
+** The initial "+" becomes "-" if DATE1 occurs before DATE2. For
+** date/time values A and B, the following invariant should hold:
+**
+** datetime(A) == (datetime(B, timediff(A,B))
+**
+** Both DATE arguments must be either a julian day number, or an
+** ISO-8601 string. The unix timestamps are not supported by this
+** routine.
+*/
+static void timediffFunc(
+ sqlite3_context *context,
+ int NotUsed1,
+ sqlite3_value **argv
+){
+ char sign;
+ int Y, M;
+ DateTime d1, d2;
+ sqlite3_str sRes;
+ UNUSED_PARAMETER(NotUsed1);
+ if( isDate(context, 1, &argv[0], &d1) ) return;
+ if( isDate(context, 1, &argv[1], &d2) ) return;
+ computeYMD_HMS(&d1);
+ computeYMD_HMS(&d2);
+ if( d1.iJD>=d2.iJD ){
+ sign = '+';
+ Y = d1.Y - d2.Y;
+ if( Y ){
+ d2.Y = d1.Y;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ M = d1.M - d2.M;
+ if( M<0 ){
+ Y--;
+ M += 12;
+ }
+ if( M!=0 ){
+ d2.M = d1.M;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ while( d1.iJD<d2.iJD ){
+ M--;
+ if( M<0 ){
+ M = 11;
+ Y--;
+ }
+ d2.M--;
+ if( d2.M<1 ){
+ d2.M = 12;
+ d2.Y--;
+ }
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ d1.iJD -= d2.iJD;
+ d1.iJD += (u64)1486995408 * (u64)100000;
+ }else /* d1<d2 */{
+ sign = '-';
+ Y = d2.Y - d1.Y;
+ if( Y ){
+ d2.Y = d1.Y;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ M = d2.M - d1.M;
+ if( M<0 ){
+ Y--;
+ M += 12;
+ }
+ if( M!=0 ){
+ d2.M = d1.M;
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ while( d1.iJD>d2.iJD ){
+ M--;
+ if( M<0 ){
+ M = 11;
+ Y--;
+ }
+ d2.M++;
+ if( d2.M>12 ){
+ d2.M = 1;
+ d2.Y++;
+ }
+ d2.validJD = 0;
+ computeJD(&d2);
+ }
+ d1.iJD = d2.iJD - d1.iJD;
+ d1.iJD += (u64)1486995408 * (u64)100000;
+ }
+ d1.validYMD = 0;
+ d1.validHMS = 0;
+ d1.validTZ = 0;
+ computeYMD_HMS(&d1);
+ sqlite3StrAccumInit(&sRes, 0, 0, 0, 100);
+ sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f",
+ sign, Y, M, d1.D-1, d1.h, d1.m, d1.s);
+ sqlite3ResultStrAccum(context, &sRes);
+}
+
+
+/*
** current_timestamp()
**
** This function returns the same value as datetime('now').
@@ -25020,6 +25714,7 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
PURE_DATE(time, -1, 0, 0, timeFunc ),
PURE_DATE(datetime, -1, 0, 0, datetimeFunc ),
PURE_DATE(strftime, -1, 0, 0, strftimeFunc ),
+ PURE_DATE(timediff, 2, 0, 0, timediffFunc ),
DFUNCTION(current_time, 0, 0, 0, ctimeFunc ),
DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
DFUNCTION(current_date, 0, 0, 0, cdateFunc ),
@@ -25173,7 +25868,7 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){
/* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite
** is using a regular VFS, it is called after the corresponding
** transaction has been committed. Injecting a fault at this point
- ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM
+ ** confuses the test scripts - the COMMIT command returns SQLITE_NOMEM
** but the transaction is committed anyway.
**
** The core must call OsFileControl() though, not OsFileControlHint(),
@@ -25794,7 +26489,7 @@ static void *sqlite3MemMalloc(int nByte){
** or sqlite3MemRealloc().
**
** For this low-level routine, we already know that pPrior!=0 since
-** cases where pPrior==0 will have been intecepted and dealt with
+** cases where pPrior==0 will have been intercepted and dealt with
** by higher-level routines.
*/
static void sqlite3MemFree(void *pPrior){
@@ -25882,7 +26577,7 @@ static int sqlite3MemInit(void *NotUsed){
return SQLITE_OK;
}
len = sizeof(cpuCount);
- /* One usually wants to use hw.acctivecpu for MT decisions, but not here */
+ /* One usually wants to use hw.activecpu for MT decisions, but not here */
sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0);
if( cpuCount>1 ){
/* defer MT decisions to system malloc */
@@ -27874,7 +28569,7 @@ static void checkMutexFree(sqlite3_mutex *p){
assert( SQLITE_MUTEX_FAST<2 );
assert( SQLITE_MUTEX_WARNONCONTENTION<2 );
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( ((CheckMutex*)p)->iType<2 )
#endif
{
@@ -28349,7 +29044,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
/*
** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields
-** are necessary under two condidtions: (1) Debug builds and (2) using
+** are necessary under two conditions: (1) Debug builds and (2) using
** home-grown mutexes. Encapsulate these conditions into a single #define.
*/
#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)
@@ -28546,7 +29241,7 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){
*/
static void pthreadMutexFree(sqlite3_mutex *p){
assert( p->nRef==0 );
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE )
#endif
{
@@ -28850,7 +29545,7 @@ struct sqlite3_mutex {
CRITICAL_SECTION mutex; /* Mutex controlling the lock */
int id; /* Mutex type */
#ifdef SQLITE_DEBUG
- volatile int nRef; /* Number of enterances */
+ volatile int nRef; /* Number of entrances */
volatile DWORD owner; /* Thread holding this mutex */
volatile LONG trace; /* True to trace changes */
#endif
@@ -28899,7 +29594,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
SQLITE_MEMORY_BARRIER;
#elif defined(__GNUC__)
__sync_synchronize();
-#elif MSVC_VERSION>=1300
+#elif MSVC_VERSION>=1400
_ReadWriteBarrier();
#elif defined(MemoryBarrier)
MemoryBarrier();
@@ -30110,7 +30805,7 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){
if( db->mallocFailed || rc ){
return apiHandleError(db, rc);
}
- return rc & db->errMask;
+ return 0;
}
/************** End of malloc.c **********************************************/
@@ -30222,57 +30917,6 @@ static const et_info fmtinfo[] = {
** %!S Like %S but prefer the zName over the zAlias
*/
-/* Floating point constants used for rounding */
-static const double arRound[] = {
- 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05,
- 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10,
-};
-
-/*
-** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point
-** conversions will work.
-*/
-#ifndef SQLITE_OMIT_FLOATING_POINT
-/*
-** "*val" is a double such that 0.1 <= *val < 10.0
-** Return the ascii code for the leading digit of *val, then
-** multiply "*val" by 10.0 to renormalize.
-**
-** Example:
-** input: *val = 3.14159
-** output: *val = 1.4159 function return = '3'
-**
-** The counter *cnt is incremented each time. After counter exceeds
-** 16 (the number of significant digits in a 64-bit float) '0' is
-** always returned.
-*/
-static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
- int digit;
- LONGDOUBLE_TYPE d;
- if( (*cnt)<=0 ) return '0';
- (*cnt)--;
- digit = (int)*val;
- d = digit;
- digit += '0';
- *val = (*val - d)*10.0;
- return (char)digit;
-}
-#endif /* SQLITE_OMIT_FLOATING_POINT */
-
-#ifndef SQLITE_OMIT_FLOATING_POINT
-/*
-** "*val" is a u64. *msd is a divisor used to extract the
-** most significant digit of *val. Extract that most significant
-** digit and return it.
-*/
-static char et_getdigit_int(u64 *val, u64 *msd){
- u64 x = (*val)/(*msd);
- *val -= x*(*msd);
- if( *msd>=10 ) *msd /= 10;
- return '0' + (char)(x & 15);
-}
-#endif /* SQLITE_OMIT_FLOATING_POINT */
-
/*
** Set the StrAccum object to an error mode.
*/
@@ -30364,20 +31008,15 @@ SQLITE_API void sqlite3_str_vappendf(
u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
sqlite_uint64 longvalue; /* Value for integer types */
- LONGDOUBLE_TYPE realvalue; /* Value for real types */
- sqlite_uint64 msd; /* Divisor to get most-significant-digit
- ** of longvalue */
+ double realvalue; /* Value for real types */
const et_info *infop; /* Pointer to the appropriate info structure */
char *zOut; /* Rendering buffer */
int nOut; /* Size of the rendering buffer */
char *zExtra = 0; /* Malloced memory used by some conversion */
-#ifndef SQLITE_OMIT_FLOATING_POINT
- int exp, e2; /* exponent of real numbers */
- int nsd; /* Number of significant digits returned */
- double rounder; /* Used for rounding floating point values */
+ int exp, e2; /* exponent of real numbers */
etByte flag_dp; /* True if decimal point should be shown */
etByte flag_rtz; /* True if trailing zeros should be removed */
-#endif
+
PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */
char buf[etBUFSIZE]; /* Conversion buffer */
@@ -30652,95 +31291,62 @@ SQLITE_API void sqlite3_str_vappendf(
break;
case etFLOAT:
case etEXP:
- case etGENERIC:
+ case etGENERIC: {
+ FpDecode s;
+ int iRound;
+ int j;
+
if( bArgList ){
realvalue = getDoubleArg(pArgList);
}else{
realvalue = va_arg(ap,double);
}
-#ifdef SQLITE_OMIT_FLOATING_POINT
- length = 0;
-#else
if( precision<0 ) precision = 6; /* Set default precision */
#ifdef SQLITE_FP_PRECISION_LIMIT
if( precision>SQLITE_FP_PRECISION_LIMIT ){
precision = SQLITE_FP_PRECISION_LIMIT;
}
#endif
- if( realvalue<0.0 ){
- realvalue = -realvalue;
- prefix = '-';
+ if( xtype==etFLOAT ){
+ iRound = -precision;
+ }else if( xtype==etGENERIC ){
+ iRound = precision;
}else{
- prefix = flag_prefix;
+ iRound = precision+1;
}
- exp = 0;
- if( xtype==etGENERIC && precision>0 ) precision--;
- testcase( precision>0xfff );
- if( realvalue<1.0e+16
- && realvalue==(LONGDOUBLE_TYPE)(longvalue = (u64)realvalue)
- ){
- /* Number is a pure integer that can be represented as u64 */
- for(msd=1; msd*10<=longvalue; msd *= 10, exp++){}
- if( exp>precision && xtype!=etFLOAT ){
- u64 rnd = msd/2;
- int kk = precision;
- while( kk-- > 0 ){ rnd /= 10; }
- longvalue += rnd;
- }
- }else{
- msd = 0;
- longvalue = 0; /* To prevent a compiler warning */
- idx = precision & 0xfff;
- rounder = arRound[idx%10];
- while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; }
- if( xtype==etFLOAT ){
- double rx = (double)realvalue;
- sqlite3_uint64 u;
- int ex;
- memcpy(&u, &rx, sizeof(u));
- ex = -1023 + (int)((u>>52)&0x7ff);
- if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16;
- realvalue += rounder;
- }
- if( sqlite3IsNaN((double)realvalue) ){
- if( flag_zeropad ){
- bufpt = "null";
- length = 4;
+ sqlite3FpDecode(&s, realvalue, iRound, flag_altform2 ? 26 : 16);
+ if( s.isSpecial ){
+ if( s.isSpecial==2 ){
+ bufpt = flag_zeropad ? "null" : "NaN";
+ length = sqlite3Strlen30(bufpt);
+ break;
+ }else if( flag_zeropad ){
+ s.z[0] = '9';
+ s.iDP = 1000;
+ s.n = 1;
+ }else{
+ memcpy(buf, "-Inf", 5);
+ bufpt = buf;
+ if( s.sign=='-' ){
+ /* no-op */
+ }else if( flag_prefix ){
+ buf[0] = flag_prefix;
}else{
- bufpt = "NaN";
- length = 3;
+ bufpt++;
}
+ length = sqlite3Strlen30(bufpt);
break;
}
-
- /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */
- if( ALWAYS(realvalue>0.0) ){
- LONGDOUBLE_TYPE scale = 1.0;
- while( realvalue>=1e100*scale && exp<=350){ scale*=1e100;exp+=100;}
- while( realvalue>=1e10*scale && exp<=350 ){ scale*=1e10; exp+=10; }
- while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; }
- realvalue /= scale;
- while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; }
- while( realvalue<1.0 ){ realvalue *= 10.0; exp--; }
- if( exp>350 ){
- if( flag_zeropad ){
- realvalue = 9.0;
- exp = 999;
- }else{
- bufpt = buf;
- buf[0] = prefix;
- memcpy(buf+(prefix!=0),"Inf",4);
- length = 3+(prefix!=0);
- break;
- }
- }
- if( xtype!=etFLOAT ){
- realvalue += rounder;
- if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; }
- }
- }
+ }
+ if( s.sign=='-' ){
+ prefix = '-';
+ }else{
+ prefix = flag_prefix;
}
+ exp = s.iDP-1;
+ if( xtype==etGENERIC && precision>0 ) precision--;
+
/*
** If the field type is etGENERIC, then convert to either etEXP
** or etFLOAT, as appropriate.
@@ -30759,9 +31365,8 @@ SQLITE_API void sqlite3_str_vappendf(
if( xtype==etEXP ){
e2 = 0;
}else{
- e2 = exp;
+ e2 = s.iDP - 1;
}
- nsd = 16 + flag_altform2*10;
bufpt = buf;
{
i64 szBufNeeded; /* Size of a temporary buffer needed */
@@ -30779,16 +31384,12 @@ SQLITE_API void sqlite3_str_vappendf(
*(bufpt++) = prefix;
}
/* Digits prior to the decimal point */
+ j = 0;
if( e2<0 ){
*(bufpt++) = '0';
- }else if( msd>0 ){
- for(; e2>=0; e2--){
- *(bufpt++) = et_getdigit_int(&longvalue,&msd);
- if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ',';
- }
}else{
for(; e2>=0; e2--){
- *(bufpt++) = et_getdigit(&realvalue,&nsd);
+ *(bufpt++) = j<s.n ? s.z[j++] : '0';
if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ',';
}
}
@@ -30798,19 +31399,12 @@ SQLITE_API void sqlite3_str_vappendf(
}
/* "0" digits after the decimal point but before the first
** significant digit of the number */
- for(e2++; e2<0; precision--, e2++){
- assert( precision>0 );
+ for(e2++; e2<0 && precision>0; precision--, e2++){
*(bufpt++) = '0';
}
/* Significant digits after the decimal point */
- if( msd>0 ){
- while( (precision--)>0 ){
- *(bufpt++) = et_getdigit_int(&longvalue,&msd);
- }
- }else{
- while( (precision--)>0 ){
- *(bufpt++) = et_getdigit(&realvalue,&nsd);
- }
+ while( (precision--)>0 ){
+ *(bufpt++) = j<s.n ? s.z[j++] : '0';
}
/* Remove trailing zeros and the "." if no digits follow the "." */
if( flag_rtz && flag_dp ){
@@ -30826,6 +31420,7 @@ SQLITE_API void sqlite3_str_vappendf(
}
/* Add the "eNNN" suffix */
if( xtype==etEXP ){
+ exp = s.iDP - 1;
*(bufpt++) = aDigits[infop->charset];
if( exp<0 ){
*(bufpt++) = '-'; exp = -exp;
@@ -30859,8 +31454,8 @@ SQLITE_API void sqlite3_str_vappendf(
while( nPad-- ) bufpt[i++] = '0';
length = width;
}
-#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */
break;
+ }
case etSIZE:
if( !bArgList ){
*(va_arg(ap,int*)) = pAccum->nChar;
@@ -31584,6 +32179,75 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){
va_end(ap);
}
+
+/*****************************************************************************
+** Reference counted string/blob storage
+*****************************************************************************/
+
+/*
+** Increase the reference count of the string by one.
+**
+** The input parameter is returned.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrRef(char *z){
+ RCStr *p = (RCStr*)z;
+ assert( p!=0 );
+ p--;
+ p->nRCRef++;
+ return z;
+}
+
+/*
+** Decrease the reference count by one. Free the string when the
+** reference count reaches zero.
+*/
+SQLITE_PRIVATE void sqlite3RCStrUnref(void *z){
+ RCStr *p = (RCStr*)z;
+ assert( p!=0 );
+ p--;
+ assert( p->nRCRef>0 );
+ if( p->nRCRef>=2 ){
+ p->nRCRef--;
+ }else{
+ sqlite3_free(p);
+ }
+}
+
+/*
+** Create a new string that is capable of holding N bytes of text, not counting
+** the zero byte at the end. The string is uninitialized.
+**
+** The reference count is initially 1. Call sqlite3RCStrUnref() to free the
+** newly allocated string.
+**
+** This routine returns 0 on an OOM.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrNew(u64 N){
+ RCStr *p = sqlite3_malloc64( N + sizeof(*p) + 1 );
+ if( p==0 ) return 0;
+ p->nRCRef = 1;
+ return (char*)&p[1];
+}
+
+/*
+** Change the size of the string so that it is able to hold N bytes.
+** The string might be reallocated, so return the new allocation.
+*/
+SQLITE_PRIVATE char *sqlite3RCStrResize(char *z, u64 N){
+ RCStr *p = (RCStr*)z;
+ RCStr *pNew;
+ assert( p!=0 );
+ p--;
+ assert( p->nRCRef==1 );
+ pNew = sqlite3_realloc64(p, N+sizeof(RCStr)+1);
+ if( pNew==0 ){
+ sqlite3_free(p);
+ return 0;
+ }else{
+ return (char*)&pNew[1];
+ }
+}
+
/************** End of printf.c **********************************************/
/************** Begin file treeview.c ****************************************/
/*
@@ -32000,6 +32664,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
sqlite3TreeViewItem(pView, "FILTER", 1);
sqlite3TreeViewExpr(pView, pWin->pFilter, 0);
sqlite3TreeViewPop(&pView);
+ if( pWin->eFrmType==TK_FILTER ) return;
}
sqlite3TreeViewPush(&pView, more);
if( pWin->zName ){
@@ -32009,7 +32674,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
}
if( pWin->zBase ) nElement++;
if( pWin->pOrderBy ) nElement++;
- if( pWin->eFrmType ) nElement++;
+ if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ) nElement++;
if( pWin->eExclude ) nElement++;
if( pWin->zBase ){
sqlite3TreeViewPush(&pView, (--nElement)>0);
@@ -32022,7 +32687,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
if( pWin->pOrderBy ){
sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY");
}
- if( pWin->eFrmType ){
+ if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ){
char zBuf[30];
const char *zFrmType = "ROWS";
if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE";
@@ -32231,7 +32896,8 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
};
assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT );
assert( pExpr->pRight );
- assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE );
+ assert( sqlite3ExprSkipCollateAndLikely(pExpr->pRight)->op
+ == TK_TRUEFALSE );
x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight);
zUniOp = azOp[x];
break;
@@ -32269,7 +32935,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
assert( ExprUseXList(pExpr) );
pFarg = pExpr->x.pList;
#ifndef SQLITE_OMIT_WINDOWFUNC
- pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0;
+ pWin = IsWindowFunc(pExpr) ? pExpr->y.pWin : 0;
#else
pWin = 0;
#endif
@@ -32295,7 +32961,13 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs);
}
if( pFarg ){
- sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0);
+ sqlite3TreeViewExprList(pView, pFarg, pWin!=0 || pExpr->pLeft, 0);
+ if( pExpr->pLeft ){
+ Expr *pOB = pExpr->pLeft;
+ assert( pOB->op==TK_ORDER );
+ assert( ExprUseXList(pOB) );
+ sqlite3TreeViewExprList(pView, pOB->x.pList, pWin!=0, "ORDERBY");
+ }
}
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pWin ){
@@ -32304,6 +32976,10 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
#endif
break;
}
+ case TK_ORDER: {
+ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, "ORDERBY");
+ break;
+ }
#ifndef SQLITE_OMIT_SUBQUERY
case TK_EXISTS: {
assert( ExprUseXSelect(pExpr) );
@@ -32357,7 +33033,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
assert( pExpr->x.pList->nExpr==2 );
pY = pExpr->x.pList->a[0].pExpr;
pZ = pExpr->x.pList->a[1].pExpr;
- sqlite3TreeViewLine(pView, "BETWEEN");
+ sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs);
sqlite3TreeViewExpr(pView, pX, 1);
sqlite3TreeViewExpr(pView, pY, 1);
sqlite3TreeViewExpr(pView, pZ, 0);
@@ -33492,7 +34168,38 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read(
return c;
}
-
+/*
+** Read a single UTF8 character out of buffer z[], but reading no
+** more than n characters from the buffer. z[] is not zero-terminated.
+**
+** Return the number of bytes used to construct the character.
+**
+** Invalid UTF8 might generate a strange result. No effort is made
+** to detect invalid UTF8.
+**
+** At most 4 bytes will be read out of z[]. The return value will always
+** be between 1 and 4.
+*/
+SQLITE_PRIVATE int sqlite3Utf8ReadLimited(
+ const u8 *z,
+ int n,
+ u32 *piOut
+){
+ u32 c;
+ int i = 1;
+ assert( n>0 );
+ c = z[0];
+ if( c>=0xc0 ){
+ c = sqlite3Utf8Trans1[c-0xc0];
+ if( n>4 ) n = 4;
+ while( i<n && (z[i] & 0xc0)==0x80 ){
+ c = (c<<6) + (0x3f & z[i]);
+ i++;
+ }
+ }
+ *piOut = c;
+ return i;
+}
/*
@@ -33890,7 +34597,7 @@ SQLITE_PRIVATE void sqlite3UtfSelfTest(void){
/*
** Calls to sqlite3FaultSim() are used to simulate a failure during testing,
** or to bypass normal error detection during testing in order to let
-** execute proceed futher downstream.
+** execute proceed further downstream.
**
** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The
** sqlite3FaultSim() function only returns non-zero during testing.
@@ -34007,6 +34714,23 @@ SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3 *db){
*/
SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){
if( rc==SQLITE_IOERR_NOMEM ) return;
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+ if( rc==SQLITE_IOERR_IN_PAGE ){
+ int ii;
+ int iErr;
+ sqlite3BtreeEnterAll(db);
+ for(ii=0; ii<db->nDb; ii++){
+ if( db->aDb[ii].pBt ){
+ iErr = sqlite3PagerWalSystemErrno(sqlite3BtreePager(db->aDb[ii].pBt));
+ if( iErr ){
+ db->iSysErrno = iErr;
+ }
+ }
+ }
+ sqlite3BtreeLeaveAll(db);
+ return;
+ }
+#endif
rc &= 0xff;
if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){
db->iSysErrno = sqlite3OsGetLastError(db->pVfs);
@@ -34051,12 +34775,16 @@ SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){
p->rc = SQLITE_INTERRUPT;
}
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
- if( db->xProgress && (++p->nProgressSteps)>=db->nProgressOps ){
- if( db->xProgress(db->pProgressArg) ){
- p->nErr++;
- p->rc = SQLITE_INTERRUPT;
+ if( db->xProgress ){
+ if( p->rc==SQLITE_INTERRUPT ){
+ p->nProgressSteps = 0;
+ }else if( (++p->nProgressSteps)>=db->nProgressOps ){
+ if( db->xProgress(db->pProgressArg) ){
+ p->nErr++;
+ p->rc = SQLITE_INTERRUPT;
+ }
+ p->nProgressSteps = 0;
}
- p->nProgressSteps = 0;
}
#endif
}
@@ -34252,43 +34980,40 @@ SQLITE_PRIVATE u8 sqlite3StrIHash(const char *z){
return h;
}
-/*
-** Compute 10 to the E-th power. Examples: E==1 results in 10.
-** E==2 results in 100. E==50 results in 1.0e50.
+/* Double-Double multiplication. (x[0],x[1]) *= (y,yy)
**
-** This routine only works for values of E between 1 and 341.
+** Reference:
+** T. J. Dekker, "A Floating-Point Technique for Extending the
+** Available Precision". 1971-07-26.
*/
-static LONGDOUBLE_TYPE sqlite3Pow10(int E){
-#if defined(_MSC_VER)
- static const LONGDOUBLE_TYPE x[] = {
- 1.0e+001L,
- 1.0e+002L,
- 1.0e+004L,
- 1.0e+008L,
- 1.0e+016L,
- 1.0e+032L,
- 1.0e+064L,
- 1.0e+128L,
- 1.0e+256L
- };
- LONGDOUBLE_TYPE r = 1.0;
- int i;
- assert( E>=0 && E<=307 );
- for(i=0; E!=0; i++, E >>=1){
- if( E & 1 ) r *= x[i];
- }
- return r;
-#else
- LONGDOUBLE_TYPE x = 10.0;
- LONGDOUBLE_TYPE r = 1.0;
- while(1){
- if( E & 1 ) r *= x;
- E >>= 1;
- if( E==0 ) break;
- x *= x;
- }
- return r;
-#endif
+static void dekkerMul2(volatile double *x, double y, double yy){
+ /*
+ ** The "volatile" keywords on parameter x[] and on local variables
+ ** below are needed force intermediate results to be truncated to
+ ** binary64 rather than be carried around in an extended-precision
+ ** format. The truncation is necessary for the Dekker algorithm to
+ ** work. Intel x86 floating point might omit the truncation without
+ ** the use of volatile.
+ */
+ volatile double tx, ty, p, q, c, cc;
+ double hx, hy;
+ u64 m;
+ memcpy(&m, (void*)&x[0], 8);
+ m &= 0xfffffffffc000000LL;
+ memcpy(&hx, &m, 8);
+ tx = x[0] - hx;
+ memcpy(&m, &y, 8);
+ m &= 0xfffffffffc000000LL;
+ memcpy(&hy, &m, 8);
+ ty = y - hy;
+ p = hx*hy;
+ q = hx*ty + tx*hy;
+ c = p+q;
+ cc = p - c + q + tx*ty;
+ cc = x[0]*yy + x[1]*y + cc;
+ x[0] = c + cc;
+ x[1] = c - x[0];
+ x[1] += cc;
}
/*
@@ -34329,12 +35054,11 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
const char *zEnd;
/* sign * significand * (10 ^ (esign * exponent)) */
int sign = 1; /* sign of significand */
- i64 s = 0; /* significand */
+ u64 s = 0; /* significand */
int d = 0; /* adjust exponent for shifting decimal point */
int esign = 1; /* sign of exponent */
int e = 0; /* exponent */
int eValid = 1; /* True exponent is either not used or is well-formed */
- double result;
int nDigit = 0; /* Number of digits processed */
int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */
@@ -34374,7 +35098,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
while( z<zEnd && sqlite3Isdigit(*z) ){
s = s*10 + (*z - '0');
z+=incr; nDigit++;
- if( s>=((LARGEST_INT64-9)/10) ){
+ if( s>=((LARGEST_UINT64-9)/10) ){
/* skip non-significant significand digits
** (increase exponent by d to shift decimal left) */
while( z<zEnd && sqlite3Isdigit(*z) ){ z+=incr; d++; }
@@ -34389,7 +35113,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
/* copy digits from after decimal to significand
** (decrease exponent by d to shift decimal right) */
while( z<zEnd && sqlite3Isdigit(*z) ){
- if( s<((LARGEST_INT64-9)/10) ){
+ if( s<((LARGEST_UINT64-9)/10) ){
s = s*10 + (*z - '0');
d--;
nDigit++;
@@ -34429,79 +35153,89 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en
while( z<zEnd && sqlite3Isspace(*z) ) z+=incr;
do_atof_calc:
- /* adjust exponent by d, and update sign */
- e = (e*esign) + d;
- if( e<0 ) {
- esign = -1;
- e *= -1;
- } else {
- esign = 1;
+ /* Zero is a special case */
+ if( s==0 ){
+ *pResult = sign<0 ? -0.0 : +0.0;
+ goto atof_return;
}
- if( s==0 ) {
- /* In the IEEE 754 standard, zero is signed. */
- result = sign<0 ? -(double)0 : (double)0;
- } else {
- /* Attempt to reduce exponent.
- **
- ** Branches that are not required for the correct answer but which only
- ** help to obtain the correct answer faster are marked with special
- ** comments, as a hint to the mutation tester.
- */
- while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/
- if( esign>0 ){
- if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/
- s *= 10;
- }else{
- if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/
- s /= 10;
- }
- e--;
- }
+ /* adjust exponent by d, and update sign */
+ e = (e*esign) + d;
- /* adjust the sign of significand */
- s = sign<0 ? -s : s;
+ /* Try to adjust the exponent to make it smaller */
+ while( e>0 && s<(LARGEST_UINT64/10) ){
+ s *= 10;
+ e--;
+ }
+ while( e<0 && (s%10)==0 ){
+ s /= 10;
+ e++;
+ }
- if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/
- result = (double)s;
+ if( e==0 ){
+ *pResult = s;
+ }else if( sqlite3Config.bUseLongDouble ){
+ LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s;
+ if( e>0 ){
+ while( e>=100 ){ e-=100; r *= 1.0e+100L; }
+ while( e>=10 ){ e-=10; r *= 1.0e+10L; }
+ while( e>=1 ){ e-=1; r *= 1.0e+01L; }
}else{
- /* attempt to handle extremely small/large numbers better */
- if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/
- if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/
- LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308);
- if( esign<0 ){
- result = s / scale;
- result /= 1.0e+308;
- }else{
- result = s * scale;
- result *= 1.0e+308;
- }
- }else{ assert( e>=342 );
- if( esign<0 ){
- result = 0.0*s;
- }else{
+ while( e<=-100 ){ e+=100; r *= 1.0e-100L; }
+ while( e<=-10 ){ e+=10; r *= 1.0e-10L; }
+ while( e<=-1 ){ e+=1; r *= 1.0e-01L; }
+ }
+ assert( r>=0.0 );
+ if( r>+1.7976931348623157081452742373e+308L ){
#ifdef INFINITY
- result = INFINITY*s;
+ *pResult = +INFINITY;
#else
- result = 1e308*1e308*s; /* Infinity */
+ *pResult = 1.0e308*10.0;
#endif
- }
- }
- }else{
- LONGDOUBLE_TYPE scale = sqlite3Pow10(e);
- if( esign<0 ){
- result = s / scale;
- }else{
- result = s * scale;
- }
+ }else{
+ *pResult = (double)r;
+ }
+ }else{
+ double rr[2];
+ u64 s2;
+ rr[0] = (double)s;
+ s2 = (u64)rr[0];
+ rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s);
+ if( e>0 ){
+ while( e>=100 ){
+ e -= 100;
+ dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83);
+ }
+ while( e>=10 ){
+ e -= 10;
+ dekkerMul2(rr, 1.0e+10, 0.0);
+ }
+ while( e>=1 ){
+ e -= 1;
+ dekkerMul2(rr, 1.0e+01, 0.0);
+ }
+ }else{
+ while( e<=-100 ){
+ e += 100;
+ dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117);
+ }
+ while( e<=-10 ){
+ e += 10;
+ dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27);
+ }
+ while( e<=-1 ){
+ e += 1;
+ dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18);
}
}
+ *pResult = rr[0]+rr[1];
+ if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300;
}
+ if( sign<0 ) *pResult = -*pResult;
+ assert( !sqlite3IsNaN(*pResult) );
- /* store the result */
- *pResult = result;
-
- /* return true if number and no extra non-whitespace chracters after */
+atof_return:
+ /* return true if number and no extra non-whitespace characters after */
if( z==zEnd && nDigit>0 && eValid && eType>0 ){
return eType;
}else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){
@@ -34637,7 +35371,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc
/* This test and assignment is needed only to suppress UB warnings
** from clang and -fsanitize=undefined. This test and assignment make
** the code a little larger and slower, and no harm comes from omitting
- ** them, but we must appaise the undefined-behavior pharisees. */
+ ** them, but we must appease the undefined-behavior pharisees. */
*pNum = neg ? SMALLEST_INT64 : LARGEST_INT64;
}else if( neg ){
*pNum = -(i64)u;
@@ -34715,7 +35449,9 @@ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){
}else
#endif /* SQLITE_OMIT_HEX_INTEGER */
{
- return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8);
+ int n = (int)(0x3fffffff&strspn(z,"+- \n\t0123456789"));
+ if( z[n] ) n++;
+ return sqlite3Atoi64(z, pOut, n, SQLITE_UTF8);
}
}
@@ -34795,6 +35531,153 @@ SQLITE_PRIVATE int sqlite3Atoi(const char *z){
}
/*
+** Decode a floating-point value into an approximate decimal
+** representation.
+**
+** Round the decimal representation to n significant digits if
+** n is positive. Or round to -n signficant digits after the
+** decimal point if n is negative. No rounding is performed if
+** n is zero.
+**
+** The significant digits of the decimal representation are
+** stored in p->z[] which is a often (but not always) a pointer
+** into the middle of p->zBuf[]. There are p->n significant digits.
+** The p->z[] array is *not* zero-terminated.
+*/
+SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRound){
+ int i;
+ u64 v;
+ int e, exp = 0;
+ p->isSpecial = 0;
+ p->z = p->zBuf;
+
+ /* Convert negative numbers to positive. Deal with Infinity, 0.0, and
+ ** NaN. */
+ if( r<0.0 ){
+ p->sign = '-';
+ r = -r;
+ }else if( r==0.0 ){
+ p->sign = '+';
+ p->n = 1;
+ p->iDP = 1;
+ p->z = "0";
+ return;
+ }else{
+ p->sign = '+';
+ }
+ memcpy(&v,&r,8);
+ e = v>>52;
+ if( (e&0x7ff)==0x7ff ){
+ p->isSpecial = 1 + (v!=0x7ff0000000000000LL);
+ p->n = 0;
+ p->iDP = 0;
+ return;
+ }
+
+ /* Multiply r by powers of ten until it lands somewhere in between
+ ** 1.0e+19 and 1.0e+17.
+ */
+ if( sqlite3Config.bUseLongDouble ){
+ LONGDOUBLE_TYPE rr = r;
+ if( rr>=1.0e+19 ){
+ while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; }
+ while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; }
+ while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; }
+ }else{
+ while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; }
+ while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; }
+ while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; }
+ }
+ v = (u64)rr;
+ }else{
+ /* If high-precision floating point is not available using "long double",
+ ** then use Dekker-style double-double computation to increase the
+ ** precision.
+ **
+ ** The error terms on constants like 1.0e+100 computed using the
+ ** decimal extension, for example as follows:
+ **
+ ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100)));
+ */
+ double rr[2];
+ rr[0] = r;
+ rr[1] = 0.0;
+ if( rr[0]>9.223372036854774784e+18 ){
+ while( rr[0]>9.223372036854774784e+118 ){
+ exp += 100;
+ dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117);
+ }
+ while( rr[0]>9.223372036854774784e+28 ){
+ exp += 10;
+ dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27);
+ }
+ while( rr[0]>9.223372036854774784e+18 ){
+ exp += 1;
+ dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18);
+ }
+ }else{
+ while( rr[0]<9.223372036854774784e-83 ){
+ exp -= 100;
+ dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83);
+ }
+ while( rr[0]<9.223372036854774784e+07 ){
+ exp -= 10;
+ dekkerMul2(rr, 1.0e+10, 0.0);
+ }
+ while( rr[0]<9.22337203685477478e+17 ){
+ exp -= 1;
+ dekkerMul2(rr, 1.0e+01, 0.0);
+ }
+ }
+ v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1];
+ }
+
+
+ /* Extract significant digits. */
+ i = sizeof(p->zBuf)-1;
+ assert( v>0 );
+ while( v ){ p->zBuf[i--] = (v%10) + '0'; v /= 10; }
+ assert( i>=0 && i<sizeof(p->zBuf)-1 );
+ p->n = sizeof(p->zBuf) - 1 - i;
+ assert( p->n>0 );
+ assert( p->n<sizeof(p->zBuf) );
+ p->iDP = p->n + exp;
+ if( iRound<0 ){
+ iRound = p->iDP - iRound;
+ if( iRound==0 && p->zBuf[i+1]>='5' ){
+ iRound = 1;
+ p->zBuf[i--] = '0';
+ p->n++;
+ p->iDP++;
+ }
+ }
+ if( iRound>0 && (iRound<p->n || p->n>mxRound) ){
+ char *z = &p->zBuf[i+1];
+ if( iRound>mxRound ) iRound = mxRound;
+ p->n = iRound;
+ if( z[iRound]>='5' ){
+ int j = iRound-1;
+ while( 1 /*exit-by-break*/ ){
+ z[j]++;
+ if( z[j]<='9' ) break;
+ z[j] = '0';
+ if( j==0 ){
+ p->z[i--] = '1';
+ p->n++;
+ p->iDP++;
+ break;
+ }else{
+ j--;
+ }
+ }
+ }
+ }
+ p->z = &p->zBuf[i+1];
+ assert( i+p->n < sizeof(p->zBuf) );
+ while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; }
+}
+
+/*
** Try to convert z into an unsigned 32-bit integer. Return true on
** success and false if there is an error.
**
@@ -35057,121 +35940,32 @@ SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){
** this function assumes the single-byte case has already been handled.
*/
SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){
- u32 a,b;
+ u64 v64;
+ u8 n;
- /* The 1-byte case. Overwhelmingly the most common. Handled inline
- ** by the getVarin32() macro */
- a = *p;
- /* a: p0 (unmasked) */
-#ifndef getVarint32
- if (!(a&0x80))
- {
- /* Values between 0 and 127 */
- *v = a;
- return 1;
- }
-#endif
+ /* Assume that the single-byte case has already been handled by
+ ** the getVarint32() macro */
+ assert( (p[0] & 0x80)!=0 );
- /* The 2-byte case */
- p++;
- b = *p;
- /* b: p1 (unmasked) */
- if (!(b&0x80))
- {
- /* Values between 128 and 16383 */
- a &= 0x7f;
- a = a<<7;
- *v = a | b;
+ if( (p[1] & 0x80)==0 ){
+ /* This is the two-byte case */
+ *v = ((p[0]&0x7f)<<7) | p[1];
return 2;
}
-
- /* The 3-byte case */
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<14 | p2 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 16384 and 2097151 */
- a &= (0x7f<<14)|(0x7f);
- b &= 0x7f;
- b = b<<7;
- *v = a | b;
+ if( (p[2] & 0x80)==0 ){
+ /* This is the three-byte case */
+ *v = ((p[0]&0x7f)<<14) | ((p[1]&0x7f)<<7) | p[2];
return 3;
}
-
- /* A 32-bit varint is used to store size information in btrees.
- ** Objects are rarely larger than 2MiB limit of a 3-byte varint.
- ** A 3-byte varint is sufficient, for example, to record the size
- ** of a 1048569-byte BLOB or string.
- **
- ** We only unroll the first 1-, 2-, and 3- byte cases. The very
- ** rare larger cases can be handled by the slower 64-bit varint
- ** routine.
- */
-#if 1
- {
- u64 v64;
- u8 n;
-
- n = sqlite3GetVarint(p-2, &v64);
- assert( n>3 && n<=9 );
- if( (v64 & SQLITE_MAX_U32)!=v64 ){
- *v = 0xffffffff;
- }else{
- *v = (u32)v64;
- }
- return n;
- }
-
-#else
- /* For following code (kept for historical record only) shows an
- ** unrolling for the 3- and 4-byte varint cases. This code is
- ** slightly faster, but it is also larger and much harder to test.
- */
- p++;
- b = b<<14;
- b |= *p;
- /* b: p1<<14 | p3 (unmasked) */
- if (!(b&0x80))
- {
- /* Values between 2097152 and 268435455 */
- b &= (0x7f<<14)|(0x7f);
- a &= (0x7f<<14)|(0x7f);
- a = a<<7;
- *v = a | b;
- return 4;
- }
-
- p++;
- a = a<<14;
- a |= *p;
- /* a: p0<<28 | p2<<14 | p4 (unmasked) */
- if (!(a&0x80))
- {
- /* Values between 268435456 and 34359738367 */
- a &= SLOT_4_2_0;
- b &= SLOT_4_2_0;
- b = b<<7;
- *v = a | b;
- return 5;
- }
-
- /* We can only reach this point when reading a corrupt database
- ** file. In that case we are not in any hurry. Use the (relatively
- ** slow) general-purpose sqlite3GetVarint() routine to extract the
- ** value. */
- {
- u64 v64;
- u8 n;
-
- p -= 4;
- n = sqlite3GetVarint(p, &v64);
- assert( n>5 && n<=9 );
+ /* four or more bytes */
+ n = sqlite3GetVarint(p, &v64);
+ assert( n>3 && n<=9 );
+ if( (v64 & SQLITE_MAX_U32)!=v64 ){
+ *v = 0xffffffff;
+ }else{
*v = (u32)v64;
- return n;
}
-#endif
+ return n;
}
/*
@@ -35322,7 +36116,7 @@ SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){
}
/*
-** Attempt to add, substract, or multiply the 64-bit signed value iB against
+** Attempt to add, subtract, or multiply the 64-bit signed value iB against
** the other 64-bit signed integer at *pA and store the result in *pA.
** Return 0 on success. Or if the operation would have resulted in an
** overflow, leave *pA unchanged and return 1.
@@ -35635,7 +36429,7 @@ SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nNam
#define SQLITE_HWTIME_H
/*
-** The following routine only works on pentium-class (or newer) processors.
+** The following routine only works on Pentium-class (or newer) processors.
** It uses the RDTSC opcode to read the cycle count value out of the
** processor and returns that value. This can be used for high-res
** profiling.
@@ -35807,7 +36601,7 @@ static void insertElement(
}
-/* Resize the hash table so that it cantains "new_size" buckets.
+/* Resize the hash table so that it contains "new_size" buckets.
**
** The hash table might fail to resize if sqlite3_malloc() fails or
** if the new size is the same as the prior size.
@@ -36167,19 +36961,22 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 171 */ "VCreate" OpHelp(""),
/* 172 */ "VDestroy" OpHelp(""),
/* 173 */ "VOpen" OpHelp(""),
- /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
- /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 176 */ "VRename" OpHelp(""),
- /* 177 */ "Pagecount" OpHelp(""),
- /* 178 */ "MaxPgcnt" OpHelp(""),
- /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"),
- /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
- /* 181 */ "Trace" OpHelp(""),
- /* 182 */ "CursorHint" OpHelp(""),
- /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
- /* 184 */ "Noop" OpHelp(""),
- /* 185 */ "Explain" OpHelp(""),
- /* 186 */ "Abortable" OpHelp(""),
+ /* 174 */ "VCheck" OpHelp(""),
+ /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
+ /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 177 */ "VRename" OpHelp(""),
+ /* 178 */ "Pagecount" OpHelp(""),
+ /* 179 */ "MaxPgcnt" OpHelp(""),
+ /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"),
+ /* 181 */ "GetSubtype" OpHelp("r[P2] = r[P1].subtype"),
+ /* 182 */ "SetSubtype" OpHelp("r[P2].subtype = r[P1]"),
+ /* 183 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
+ /* 184 */ "Trace" OpHelp(""),
+ /* 185 */ "CursorHint" OpHelp(""),
+ /* 186 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
+ /* 187 */ "Noop" OpHelp(""),
+ /* 188 */ "Explain" OpHelp(""),
+ /* 189 */ "Abortable" OpHelp(""),
};
return azName[i];
}
@@ -37193,7 +37990,7 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){
** This source file is organized into divisions where the logic for various
** subfunctions is contained within the appropriate division. PLEASE
** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed
-** in the correct division and should be clearly labeled.
+** in the correct division and should be clearly labelled.
**
** The layout of divisions is as follows:
**
@@ -37780,7 +38577,7 @@ static int robustFchown(int fd, uid_t uid, gid_t gid){
/*
** This is the xSetSystemCall() method of sqlite3_vfs for all of the
-** "unix" VFSes. Return SQLITE_OK opon successfully updating the
+** "unix" VFSes. Return SQLITE_OK upon successfully updating the
** system call pointer, or SQLITE_NOTFOUND if there is no configurable
** system call named zName.
*/
@@ -38302,7 +39099,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){
** If you close a file descriptor that points to a file that has locks,
** all locks on that file that are owned by the current process are
** released. To work around this problem, each unixInodeInfo object
-** maintains a count of the number of pending locks on tha inode.
+** maintains a count of the number of pending locks on the inode.
** When an attempt is made to close an unixFile, if there are
** other unixFile open on the same inode that are holding locks, the call
** to close() the file descriptor is deferred until all of the locks clear.
@@ -38316,7 +39113,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){
** not posix compliant. Under LinuxThreads, a lock created by thread
** A cannot be modified or overridden by a different thread B.
** Only thread A can modify the lock. Locking behavior is correct
-** if the appliation uses the newer Native Posix Thread Library (NPTL)
+** if the application uses the newer Native Posix Thread Library (NPTL)
** on linux - with NPTL a lock created by thread A can override locks
** in thread B. But there is no way to know at compile-time which
** threading library is being used. So there is no way to know at
@@ -38518,7 +39315,7 @@ static void storeLastErrno(unixFile *pFile, int error){
}
/*
-** Close all file descriptors accumuated in the unixInodeInfo->pUnused list.
+** Close all file descriptors accumulated in the unixInodeInfo->pUnused list.
*/
static void closePendingFds(unixFile *pFile){
unixInodeInfo *pInode = pFile->pInode;
@@ -38881,7 +39678,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** slightly in order to be compatible with Windows95 systems simultaneously
** accessing the same database file, in case that is ever required.
**
- ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved
+ ** Symbols defined in os.h identify the 'pending byte' and the 'reserved
** byte', each single bytes at well known offsets, and the 'shared byte
** range', a range of 510 bytes at a well known offset.
**
@@ -38889,7 +39686,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** byte'. If this is successful, 'shared byte range' is read-locked
** and the lock on the 'pending byte' released. (Legacy note: When
** SQLite was first developed, Windows95 systems were still very common,
- ** and Widnows95 lacks a shared-lock capability. So on Windows95, a
+ ** and Windows95 lacks a shared-lock capability. So on Windows95, a
** single randomly selected by from the 'shared byte range' is locked.
** Windows95 is now pretty much extinct, but this work-around for the
** lack of shared-locks on Windows95 lives on, for backwards
@@ -38910,7 +39707,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** obtaining a write-lock on the 'pending byte'. This ensures that no new
** SHARED locks can be obtained, but existing SHARED locks are allowed to
** persist. If the call to this function fails to obtain the EXCLUSIVE
- ** lock in this case, it holds the PENDING lock intead. The client may
+ ** lock in this case, it holds the PENDING lock instead. The client may
** then re-attempt the EXCLUSIVE lock later on, after existing SHARED
** locks have cleared.
*/
@@ -38938,7 +39735,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
/* Make sure the locking sequence is correct.
** (1) We never move from unlocked to anything higher than shared lock.
- ** (2) SQLite never explicitly requests a pendig lock.
+ ** (2) SQLite never explicitly requests a pending lock.
** (3) A shared lock is always held when a reserve lock is requested.
*/
assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
@@ -40156,7 +40953,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
/* Make sure the locking sequence is correct
** (1) We never move from unlocked to anything higher than shared lock.
- ** (2) SQLite never explicitly requests a pendig lock.
+ ** (2) SQLite never explicitly requests a pending lock.
** (3) A shared lock is always held when a reserve lock is requested.
*/
assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );
@@ -40272,7 +41069,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST +
pInode->sharedByte, 1, 0)) ){
int failed2 = SQLITE_OK;
- /* now attemmpt to get the exclusive lock range */
+ /* now attempt to get the exclusive lock range */
failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST,
SHARED_SIZE, 1);
if( failed && (failed2 = afpSetLock(context->dbPath, pFile,
@@ -40321,9 +41118,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
unixInodeInfo *pInode;
afpLockingContext *context = (afpLockingContext *) pFile->lockingContext;
int skipShared = 0;
-#ifdef SQLITE_TEST
- int h = pFile->h;
-#endif
assert( pFile );
OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock,
@@ -40339,9 +41133,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
assert( pInode->nShared!=0 );
if( pFile->eFileLock>SHARED_LOCK ){
assert( pInode->eFileLock==pFile->eFileLock );
- SimulateIOErrorBenign(1);
- SimulateIOError( h=(-1) )
- SimulateIOErrorBenign(0);
#ifdef SQLITE_DEBUG
/* When reducing a lock such that other processes can start
@@ -40390,9 +41181,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte;
pInode->nShared--;
if( pInode->nShared==0 ){
- SimulateIOErrorBenign(1);
- SimulateIOError( h=(-1) )
- SimulateIOErrorBenign(0);
if( !skipShared ){
rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0);
}
@@ -40567,7 +41355,7 @@ static int unixRead(
#endif
#if SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this read request as possible by transfering
+ /* Deal with as much of this read request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -40719,7 +41507,7 @@ static int unixWrite(
#endif
#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this write request as possible by transfering
+ /* Deal with as much of this write request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -40841,7 +41629,7 @@ static int full_fsync(int fd, int fullSync, int dataOnly){
/* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a
** no-op. But go ahead and call fstat() to validate the file
** descriptor as we need a method to provoke a failure during
- ** coverate testing.
+ ** coverage testing.
*/
#ifdef SQLITE_NO_SYNC
{
@@ -41234,7 +42022,13 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
case SQLITE_FCNTL_LOCK_TIMEOUT: {
int iOld = pFile->iBusyTimeout;
+#if SQLITE_ENABLE_SETLK_TIMEOUT==1
pFile->iBusyTimeout = *(int*)pArg;
+#elif SQLITE_ENABLE_SETLK_TIMEOUT==2
+ pFile->iBusyTimeout = !!(*(int*)pArg);
+#else
+# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2"
+#endif
*(int*)pArg = iOld;
return SQLITE_OK;
}
@@ -41487,6 +42281,25 @@ static int unixGetpagesize(void){
** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and
** unixMutexHeld() is true when reading or writing any other field
** in this structure.
+**
+** aLock[SQLITE_SHM_NLOCK]:
+** This array records the various locks held by clients on each of the
+** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no
+** locks are held by the process on this slot. If it is set to -1, then
+** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[]
+** value is set to a positive value, then it is the number of shared
+** locks currently held on the slot.
+**
+** aMutex[SQLITE_SHM_NLOCK]:
+** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex
+** pShmMutex is used to protect the aLock[] array and the right to
+** call fcntl() on unixShmNode.hShm to obtain or release locks.
+**
+** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array
+** of mutexes - one for each locking slot. To read or write locking
+** slot aLock[iSlot], the caller must hold the corresponding mutex
+** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a
+** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held.
*/
struct unixShmNode {
unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */
@@ -41500,10 +42313,11 @@ struct unixShmNode {
char **apRegion; /* Array of mapped shared-memory regions */
int nRef; /* Number of unixShm objects pointing to this */
unixShm *pFirst; /* All unixShm objects pointing to this */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK];
+#endif
int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */
#ifdef SQLITE_DEBUG
- u8 exclMask; /* Mask of exclusive locks held */
- u8 sharedMask; /* Mask of shared locks held */
u8 nextShmId; /* Next available unixShm.id value */
#endif
};
@@ -41586,16 +42400,35 @@ static int unixShmSystemLock(
struct flock f; /* The posix advisory locking structure */
int rc = SQLITE_OK; /* Result code form fcntl() */
- /* Access to the unixShmNode object is serialized by the caller */
pShmNode = pFile->pInode->pShmNode;
- assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
- assert( pShmNode->nRef>0 || unixMutexHeld() );
+
+ /* Assert that the parameters are within expected range and that the
+ ** correct mutex or mutexes are held. */
+ assert( pShmNode->nRef>=0 );
+ assert( (ofst==UNIX_SHM_DMS && n==1)
+ || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK))
+ );
+ if( ofst==UNIX_SHM_DMS ){
+ assert( pShmNode->nRef>0 || unixMutexHeld() );
+ assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) );
+ }else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int ii;
+ for(ii=ofst-UNIX_SHM_BASE; ii<ofst-UNIX_SHM_BASE+n; ii++){
+ assert( sqlite3_mutex_held(pShmNode->aMutex[ii]) );
+ }
+#else
+ assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
+ assert( pShmNode->nRef>0 );
+#endif
+ }
/* Shared locks never span more than one byte */
assert( n==1 || lockType!=F_RDLCK );
/* Locks are within range */
assert( n>=1 && n<=SQLITE_SHM_NLOCK );
+ assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) );
if( pShmNode->hShm>=0 ){
int res;
@@ -41606,7 +42439,7 @@ static int unixShmSystemLock(
f.l_len = n;
res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile);
if( res==-1 ){
-#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1
rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY);
#else
rc = SQLITE_BUSY;
@@ -41614,39 +42447,28 @@ static int unixShmSystemLock(
}
}
- /* Update the global lock state and do debug tracing */
+ /* Do debug tracing */
#ifdef SQLITE_DEBUG
- { u16 mask;
OSTRACE(("SHM-LOCK "));
- mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<<ofst);
if( rc==SQLITE_OK ){
if( lockType==F_UNLCK ){
- OSTRACE(("unlock %d ok", ofst));
- pShmNode->exclMask &= ~mask;
- pShmNode->sharedMask &= ~mask;
+ OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1));
}else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock %d ok", ofst));
- pShmNode->exclMask &= ~mask;
- pShmNode->sharedMask |= mask;
+ OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1));
}else{
assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d ok", ofst));
- pShmNode->exclMask |= mask;
- pShmNode->sharedMask &= ~mask;
+ OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1));
}
}else{
if( lockType==F_UNLCK ){
- OSTRACE(("unlock %d failed", ofst));
+ OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1));
}else if( lockType==F_RDLCK ){
- OSTRACE(("read-lock failed"));
+ OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1));
}else{
assert( lockType==F_WRLCK );
- OSTRACE(("write-lock %d failed", ofst));
+ OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1));
}
}
- OSTRACE((" - afterwards %03x,%03x\n",
- pShmNode->sharedMask, pShmNode->exclMask));
- }
#endif
return rc;
@@ -41683,6 +42505,11 @@ static void unixShmPurge(unixFile *pFd){
int i;
assert( p->pInode==pFd->pInode );
sqlite3_mutex_free(p->pShmMutex);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ for(i=0; i<SQLITE_SHM_NLOCK; i++){
+ sqlite3_mutex_free(p->aMutex[i]);
+ }
+#endif
for(i=0; i<p->nRegion; i+=nShmPerMap){
if( p->hShm>=0 ){
osMunmap(p->apRegion[i], p->szRegion);
@@ -41742,7 +42569,20 @@ static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){
pShmNode->isUnlocked = 1;
rc = SQLITE_READONLY_CANTINIT;
}else{
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* Do not use a blocking lock here. If the lock cannot be obtained
+ ** immediately, it means some other connection is truncating the
+ ** *-shm file. And after it has done so, it will not release its
+ ** lock, but only downgrade it to a shared lock. So no point in
+ ** blocking here. The call below to obtain the shared DMS lock may
+ ** use a blocking lock. */
+ int iSaveTimeout = pDbFd->iBusyTimeout;
+ pDbFd->iBusyTimeout = 0;
+#endif
rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1);
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ pDbFd->iBusyTimeout = iSaveTimeout;
+#endif
/* The first connection to attach must truncate the -shm file. We
** truncate to 3 bytes (an arbitrary small number, less than the
** -shm header size) rather than 0 as a system debugging aid, to
@@ -41863,6 +42703,18 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
rc = SQLITE_NOMEM_BKPT;
goto shm_open_err;
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ {
+ int ii;
+ for(ii=0; ii<SQLITE_SHM_NLOCK; ii++){
+ pShmNode->aMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
+ if( pShmNode->aMutex[ii]==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto shm_open_err;
+ }
+ }
+ }
+#endif
}
if( pInode->bProcessLock==0 ){
@@ -42084,9 +42936,11 @@ shmpage_out:
*/
#ifdef SQLITE_DEBUG
static int assertLockingArrayOk(unixShmNode *pShmNode){
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ return 1;
+#else
unixShm *pX;
int aLock[SQLITE_SHM_NLOCK];
- assert( sqlite3_mutex_held(pShmNode->pShmMutex) );
memset(aLock, 0, sizeof(aLock));
for(pX=pShmNode->pFirst; pX; pX=pX->pNext){
@@ -42104,13 +42958,14 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){
assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) );
return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0);
+#endif
}
#endif
/*
** Change the lock state for a shared-memory segment.
**
-** Note that the relationship between SHAREd and EXCLUSIVE locks is a little
+** Note that the relationship between SHARED and EXCLUSIVE locks is a little
** different here than in posix. In xShmLock(), one can go from unlocked
** to shared and back or from unlocked to exclusive and back. But one may
** not go from shared to exclusive or from exclusive to shared.
@@ -42125,7 +42980,7 @@ static int unixShmLock(
unixShm *p; /* The shared memory being locked */
unixShmNode *pShmNode; /* The underlying file iNode */
int rc = SQLITE_OK; /* Result code */
- u16 mask; /* Mask of locks to take or release */
+ u16 mask = (1<<(ofst+n)) - (1<<ofst); /* Mask of locks to take or release */
int *aLock;
p = pDbFd->pShm;
@@ -42160,88 +43015,151 @@ static int unixShmLock(
** It is not permitted to block on the RECOVER lock.
*/
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
- assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
- (ofst!=2) /* not RECOVER */
- && (ofst!=1 || (p->exclMask|p->sharedMask)==0)
- && (ofst!=0 || (p->exclMask|p->sharedMask)<3)
- && (ofst<3 || (p->exclMask|p->sharedMask)<(1<<ofst))
- ));
+ {
+ u16 lockMask = (p->exclMask|p->sharedMask);
+ assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || (
+ (ofst!=2) /* not RECOVER */
+ && (ofst!=1 || lockMask==0 || lockMask==2)
+ && (ofst!=0 || lockMask<3)
+ && (ofst<3 || lockMask<(1<<ofst))
+ ));
+ }
#endif
- mask = (1<<(ofst+n)) - (1<<ofst);
- assert( n>1 || mask==(1<<ofst) );
- sqlite3_mutex_enter(pShmNode->pShmMutex);
- assert( assertLockingArrayOk(pShmNode) );
- if( flags & SQLITE_SHM_UNLOCK ){
- if( (p->exclMask|p->sharedMask) & mask ){
- int ii;
- int bUnlock = 1;
+ /* Check if there is any work to do. There are three cases:
+ **
+ ** a) An unlock operation where there are locks to unlock,
+ ** b) An shared lock where the requested lock is not already held
+ ** c) An exclusive lock where the requested lock is not already held
+ **
+ ** The SQLite core never requests an exclusive lock that it already holds.
+ ** This is assert()ed below.
+ */
+ assert( flags!=(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)
+ || 0==(p->exclMask & mask)
+ );
+ if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask))
+ || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask))
+ || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK))
+ ){
- for(ii=ofst; ii<ofst+n; ii++){
- if( aLock[ii]>((p->sharedMask & (1<<ii)) ? 1 : 0) ){
- bUnlock = 0;
- }
+ /* Take the required mutexes. In SETLK_TIMEOUT mode (blocking locks), if
+ ** this is an attempt on an exclusive lock use sqlite3_mutex_try(). If any
+ ** other thread is holding this mutex, then it is either holding or about
+ ** to hold a lock exclusive to the one being requested, and we may
+ ** therefore return SQLITE_BUSY to the caller.
+ **
+ ** Doing this prevents some deadlock scenarios. For example, thread 1 may
+ ** be a checkpointer blocked waiting on the WRITER lock. And thread 2
+ ** may be a normal SQL client upgrading to a write transaction. In this
+ ** case thread 2 does a non-blocking request for the WRITER lock. But -
+ ** if it were to use sqlite3_mutex_enter() then it would effectively
+ ** become a (doomed) blocking request, as thread 2 would block until thread
+ ** 1 obtained WRITER and released the mutex. Since thread 2 already holds
+ ** a lock on a read-locking slot at this point, this breaks the
+ ** anti-deadlock rules (see above). */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int iMutex;
+ for(iMutex=ofst; iMutex<ofst+n; iMutex++){
+ if( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) ){
+ rc = sqlite3_mutex_try(pShmNode->aMutex[iMutex]);
+ if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes;
+ }else{
+ sqlite3_mutex_enter(pShmNode->aMutex[iMutex]);
}
+ }
+#else
+ sqlite3_mutex_enter(pShmNode->pShmMutex);
+#endif
- if( bUnlock ){
- rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
- if( rc==SQLITE_OK ){
- memset(&aLock[ofst], 0, sizeof(int)*n);
+ if( ALWAYS(rc==SQLITE_OK) ){
+ if( flags & SQLITE_SHM_UNLOCK ){
+ /* Case (a) - unlock. */
+ int bUnlock = 1;
+ assert( (p->exclMask & p->sharedMask)==0 );
+ assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask );
+ assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask );
+
+ /* If this is a SHARED lock being unlocked, it is possible that other
+ ** clients within this process are holding the same SHARED lock. In
+ ** this case, set bUnlock to 0 so that the posix lock is not removed
+ ** from the file-descriptor below. */
+ if( flags & SQLITE_SHM_SHARED ){
+ assert( n==1 );
+ assert( aLock[ofst]>=1 );
+ if( aLock[ofst]>1 ){
+ bUnlock = 0;
+ aLock[ofst]--;
+ p->sharedMask &= ~mask;
+ }
}
- }else if( ALWAYS(p->sharedMask & (1<<ofst)) ){
- assert( n==1 && aLock[ofst]>1 );
- aLock[ofst]--;
- }
- /* Undo the local locks */
- if( rc==SQLITE_OK ){
- p->exclMask &= ~mask;
- p->sharedMask &= ~mask;
- }
- }
- }else if( flags & SQLITE_SHM_SHARED ){
- assert( n==1 );
- assert( (p->exclMask & (1<<ofst))==0 );
- if( (p->sharedMask & mask)==0 ){
- if( aLock[ofst]<0 ){
- rc = SQLITE_BUSY;
- }else if( aLock[ofst]==0 ){
- rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
- }
+ if( bUnlock ){
+ rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ memset(&aLock[ofst], 0, sizeof(int)*n);
+ p->sharedMask &= ~mask;
+ p->exclMask &= ~mask;
+ }
+ }
+ }else if( flags & SQLITE_SHM_SHARED ){
+ /* Case (b) - a shared lock. */
- /* Get the local shared locks */
- if( rc==SQLITE_OK ){
- p->sharedMask |= mask;
- aLock[ofst]++;
- }
- }
- }else{
- /* Make sure no sibling connections hold locks that will block this
- ** lock. If any do, return SQLITE_BUSY right away. */
- int ii;
- for(ii=ofst; ii<ofst+n; ii++){
- assert( (p->sharedMask & mask)==0 );
- if( ALWAYS((p->exclMask & (1<<ii))==0) && aLock[ii] ){
- rc = SQLITE_BUSY;
- break;
- }
- }
+ if( aLock[ofst]<0 ){
+ /* An exclusive lock is held by some other connection. BUSY. */
+ rc = SQLITE_BUSY;
+ }else if( aLock[ofst]==0 ){
+ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n);
+ }
- /* Get the exclusive locks at the system level. Then if successful
- ** also update the in-memory values. */
- if( rc==SQLITE_OK ){
- rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
- if( rc==SQLITE_OK ){
+ /* Get the local shared locks */
+ if( rc==SQLITE_OK ){
+ p->sharedMask |= mask;
+ aLock[ofst]++;
+ }
+ }else{
+ /* Case (c) - an exclusive lock. */
+ int ii;
+
+ assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) );
assert( (p->sharedMask & mask)==0 );
- p->exclMask |= mask;
+ assert( (p->exclMask & mask)==0 );
+
+ /* Make sure no sibling connections hold locks that will block this
+ ** lock. If any do, return SQLITE_BUSY right away. */
for(ii=ofst; ii<ofst+n; ii++){
- aLock[ii] = -1;
+ if( aLock[ii] ){
+ rc = SQLITE_BUSY;
+ break;
+ }
+ }
+
+ /* Get the exclusive locks at the system level. Then if successful
+ ** also update the in-memory values. */
+ if( rc==SQLITE_OK ){
+ rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n);
+ if( rc==SQLITE_OK ){
+ p->exclMask |= mask;
+ for(ii=ofst; ii<ofst+n; ii++){
+ aLock[ii] = -1;
+ }
+ }
}
}
+ assert( assertLockingArrayOk(pShmNode) );
}
+
+ /* Drop the mutexes acquired above. */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ leave_shmnode_mutexes:
+ for(iMutex--; iMutex>=ofst; iMutex--){
+ sqlite3_mutex_leave(pShmNode->aMutex[iMutex]);
+ }
+#else
+ sqlite3_mutex_leave(pShmNode->pShmMutex);
+#endif
}
- assert( assertLockingArrayOk(pShmNode) );
- sqlite3_mutex_leave(pShmNode->pShmMutex);
+
OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n",
p->id, osGetpid(0), p->sharedMask, p->exclMask));
return rc;
@@ -42491,11 +43409,16 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
#if SQLITE_MAX_MMAP_SIZE>0
if( pFd->mmapSizeMax>0 ){
+ /* Ensure that there is always at least a 256 byte buffer of addressable
+ ** memory following the returned page. If the database is corrupt,
+ ** SQLite may overread the page slightly (in practice only a few bytes,
+ ** but 256 is safe, round, number). */
+ const int nEofBuffer = 256;
if( pFd->pMapRegion==0 ){
int rc = unixMapfile(pFd, -1);
if( rc!=SQLITE_OK ) return rc;
}
- if( pFd->mmapSize >= iOff+nAmt ){
+ if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){
*pp = &((u8 *)pFd->pMapRegion)[iOff];
pFd->nFetchOut++;
}
@@ -43886,12 +44809,17 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){
** than the argument.
*/
static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){
-#if OS_VXWORKS || _POSIX_C_SOURCE >= 199309L
+#if !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP+0
struct timespec sp;
-
sp.tv_sec = microseconds / 1000000;
sp.tv_nsec = (microseconds % 1000000) * 1000;
+
+ /* Almost all modern unix systems support nanosleep(). But if you are
+ ** compiling for one of the rare exceptions, you can use
+ ** -DHAVE_NANOSLEEP=0 (perhaps in conjuction with -DHAVE_USLEEP if
+ ** usleep() is available) in order to bypass the use of nanosleep() */
nanosleep(&sp, NULL);
+
UNUSED_PARAMETER(NotUsed);
return microseconds;
#elif defined(HAVE_USLEEP) && HAVE_USLEEP
@@ -46481,7 +47409,7 @@ static struct win_syscall {
/*
** This is the xSetSystemCall() method of sqlite3_vfs for all of the
-** "win32" VFSes. Return SQLITE_OK opon successfully updating the
+** "win32" VFSes. Return SQLITE_OK upon successfully updating the
** system call pointer, or SQLITE_NOTFOUND if there is no configurable
** system call named zName.
*/
@@ -48061,7 +48989,7 @@ static int winRead(
pFile->h, pBuf, amt, offset, pFile->locktype));
#if SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this read request as possible by transfering
+ /* Deal with as much of this read request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -48139,7 +49067,7 @@ static int winWrite(
pFile->h, pBuf, amt, offset, pFile->locktype));
#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0
- /* Deal with as much of this write request as possible by transfering
+ /* Deal with as much of this write request as possible by transferring
** data from the memory mapping using memcpy(). */
if( offset<pFile->mmapSize ){
if( offset+amt <= pFile->mmapSize ){
@@ -48249,7 +49177,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
** all references to memory-mapped content are closed. That is doable,
** but involves adding a few branches in the common write code path which
** could slow down normal operations slightly. Hence, we have decided for
- ** now to simply make trancations a no-op if there are pending reads. We
+ ** now to simply make transactions a no-op if there are pending reads. We
** can maybe revisit this decision in the future.
*/
return SQLITE_OK;
@@ -48308,7 +49236,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
#ifdef SQLITE_TEST
/*
** Count the number of fullsyncs and normal syncs. This is used to test
-** that syncs and fullsyncs are occuring at the right times.
+** that syncs and fullsyncs are occurring at the right times.
*/
SQLITE_API int sqlite3_sync_count = 0;
SQLITE_API int sqlite3_fullsync_count = 0;
@@ -48665,7 +49593,7 @@ static int winLock(sqlite3_file *id, int locktype){
*/
if( locktype==EXCLUSIVE_LOCK && res ){
assert( pFile->locktype>=SHARED_LOCK );
- res = winUnlockReadLock(pFile);
+ (void)winUnlockReadLock(pFile);
res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0,
SHARED_SIZE, 0);
if( res ){
@@ -49843,6 +50771,11 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
#if SQLITE_MAX_MMAP_SIZE>0
if( pFd->mmapSizeMax>0 ){
+ /* Ensure that there is always at least a 256 byte buffer of addressable
+ ** memory following the returned page. If the database is corrupt,
+ ** SQLite may overread the page slightly (in practice only a few bytes,
+ ** but 256 is safe, round, number). */
+ const int nEofBuffer = 256;
if( pFd->pMapRegion==0 ){
int rc = winMapfile(pFd, -1);
if( rc!=SQLITE_OK ){
@@ -49851,7 +50784,7 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
return rc;
}
}
- if( pFd->mmapSize >= iOff+nAmt ){
+ if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){
assert( pFd->pMapRegion!=0 );
*pp = &((u8 *)pFd->pMapRegion)[iOff];
pFd->nFetchOut++;
@@ -50069,6 +51002,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789";
size_t i, j;
+ DWORD pid;
int nPre = sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX);
int nMax, nBuf, nDir, nLen;
char *zBuf;
@@ -50281,7 +51215,10 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
j = sqlite3Strlen30(zBuf);
sqlite3_randomness(15, &zBuf[j]);
+ pid = osGetCurrentProcessId();
for(i=0; i<15; i++, j++){
+ zBuf[j] += pid & 0xff;
+ pid >>= 8;
zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
}
zBuf[j] = 0;
@@ -52646,7 +53583,7 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){
h = BITVEC_HASH(i++);
/* if there wasn't a hash collision, and this doesn't */
/* completely fill the hash, then just add it without */
- /* worring about sub-dividing and re-hashing. */
+ /* worrying about sub-dividing and re-hashing. */
if( !p->u.aHash[h] ){
if (p->nSet<(BITVEC_NINT-1)) {
goto bitvec_set_end;
@@ -52979,7 +53916,7 @@ struct PCache {
** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
** This routine runs inside of assert() statements only.
*/
-#ifdef SQLITE_DEBUG
+#if defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
@@ -52987,6 +53924,16 @@ static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
}
return 0;
}
+static int pageNotOnDirtyList(PCache *pCache, PgHdr *pPg){
+ PgHdr *p;
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ if( p==pPg ) return 0;
+ }
+ return 1;
+}
+#else
+# define pageOnDirtyList(A,B) 1
+# define pageNotOnDirtyList(A,B) 1
#endif
/*
@@ -53007,7 +53954,7 @@ SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
assert( pCache!=0 ); /* Every page has an associated PCache */
if( pPg->flags & PGHDR_CLEAN ){
assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
- assert( !pageOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirty list */
+ assert( pageNotOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirtylist */
}else{
assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */
assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );
@@ -53143,7 +54090,7 @@ static int numberOfCachePages(PCache *p){
return p->szCache;
}else{
i64 n;
- /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the
+ /* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the
** number of cache pages is adjusted to be a number of pages that would
** use approximately abs(N*1024) bytes of memory based on the current
** page size. */
@@ -53631,7 +54578,7 @@ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
}
/*
-** Sort the list of pages in accending order by pgno. Pages are
+** Sort the list of pages in ascending order by pgno. Pages are
** connected by pDirty pointers. The pDirtyPrev pointers are
** corrupted by this sort.
**
@@ -53871,7 +54818,7 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd
** If N is positive, then N pages worth of memory are allocated using a single
** sqlite3Malloc() call and that memory is used for the first N pages allocated.
** Or if N is negative, then -1024*N bytes of memory are allocated and used
-** for as many pages as can be accomodated.
+** for as many pages as can be accommodated.
**
** Only one of (2) or (3) can be used. Once the memory available to (2) or
** (3) is exhausted, subsequent allocations fail over to the general-purpose
@@ -53905,7 +54852,7 @@ typedef struct PGroup PGroup;
** in memory directly after the associated page data, if the database is
** corrupt, code at the b-tree layer may overread the page buffer and
** read part of this structure before the corruption is detected. This
-** can cause a valgrind error if the unitialized gap is accessed. Using u16
+** can cause a valgrind error if the uninitialized gap is accessed. Using u16
** ensures there is no such gap, and therefore no bytes of uninitialized
** memory in the structure.
**
@@ -55125,7 +56072,7 @@ SQLITE_PRIVATE void sqlite3PcacheStats(
** The TEST primitive includes a "batch" number. The TEST primitive
** will only see elements that were inserted before the last change
** in the batch number. In other words, if an INSERT occurs between
-** two TESTs where the TESTs have the same batch nubmer, then the
+** two TESTs where the TESTs have the same batch number, then the
** value added by the INSERT will not be visible to the second TEST.
** The initial batch number is zero, so if the very first TEST contains
** a non-zero batch number, it will see all prior INSERTs.
@@ -55657,6 +56604,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
# define sqlite3WalFramesize(z) 0
# define sqlite3WalFindFrame(x,y,z) 0
# define sqlite3WalFile(x) 0
+# undef SQLITE_USE_SEH
#else
#define WAL_SAVEPOINT_NDATA 4
@@ -55763,6 +56711,10 @@ SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock);
SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db);
#endif
+#ifdef SQLITE_USE_SEH
+SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal*);
+#endif
+
#endif /* ifndef SQLITE_OMIT_WAL */
#endif /* SQLITE_WAL_H */
@@ -56048,7 +57000,7 @@ int sqlite3PagerTrace=1; /* True to enable tracing */
** outstanding transactions have been abandoned, the pager is able to
** transition back to OPEN state, discarding the contents of the
** page-cache and any other in-memory state at the same time. Everything
-** is reloaded from disk (and, if necessary, hot-journal rollback peformed)
+** is reloaded from disk (and, if necessary, hot-journal rollback performed)
** when a read-transaction is next opened on the pager (transitioning
** the pager into READER state). At that point the system has recovered
** from the error.
@@ -56435,7 +57387,7 @@ struct Pager {
char *zJournal; /* Name of the journal file */
int (*xBusyHandler)(void*); /* Function to call when busy */
void *pBusyHandlerArg; /* Context argument for xBusyHandler */
- int aStat[4]; /* Total cache hits, misses, writes, spills */
+ u32 aStat[4]; /* Total cache hits, misses, writes, spills */
#ifdef SQLITE_TEST
int nRead; /* Database pages read */
#endif
@@ -56565,9 +57517,8 @@ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){
#ifndef SQLITE_OMIT_WAL
if( pPager->pWal ){
u32 iRead = 0;
- int rc;
- rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
- return (rc==SQLITE_OK && iRead==0);
+ (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
+ return iRead==0;
}
#endif
return 1;
@@ -57239,9 +58190,32 @@ static int writeJournalHdr(Pager *pPager){
memset(zHeader, 0, sizeof(aJournalMagic)+4);
}
+
+
/* The random check-hash initializer */
- sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ if( pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){
+ sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit);
+ }
+#ifdef SQLITE_DEBUG
+ else{
+ /* The Pager.cksumInit variable is usually randomized above to protect
+ ** against there being existing records in the journal file. This is
+ ** dangerous, as following a crash they may be mistaken for records
+ ** written by the current transaction and rolled back into the database
+ ** file, causing corruption. The following assert statements verify
+ ** that this is not required in "journal_mode=memory" mode, as in that
+ ** case the journal file is always 0 bytes in size at this point.
+ ** It is advantageous to avoid the sqlite3_randomness() call if possible
+ ** as it takes the global PRNG mutex. */
+ i64 sz = 0;
+ sqlite3OsFileSize(pPager->jfd, &sz);
+ assert( sz==0 );
+ assert( pPager->journalOff==journalHdrOffset(pPager) );
+ assert( sqlite3JournalIsInMemory(pPager->jfd) );
+ }
+#endif
put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit);
+
/* The initial database size */
put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize);
/* The assumed sector size for this process */
@@ -57421,7 +58395,7 @@ static int readJournalHdr(
** + 4 bytes: super-journal name checksum.
** + 8 bytes: aJournalMagic[].
**
-** The super-journal page checksum is the sum of the bytes in thesuper-journal
+** The super-journal page checksum is the sum of the bytes in the super-journal
** name, where each byte is interpreted as a signed 8-bit integer.
**
** If zSuper is a NULL pointer (occurs for a single database transaction),
@@ -57474,7 +58448,7 @@ static int writeSuperJournal(Pager *pPager, const char *zSuper){
}
pPager->journalOff += (nSuper+20);
- /* If the pager is in peristent-journal mode, then the physical
+ /* If the pager is in persistent-journal mode, then the physical
** journal-file may extend past the end of the super-journal name
** and 8 bytes of magic data just written to the file. This is
** dangerous because the code to rollback a hot-journal file
@@ -57644,7 +58618,7 @@ static void pager_unlock(Pager *pPager){
/*
** This function is called whenever an IOERR or FULL error that requires
-** the pager to transition into the ERROR state may ahve occurred.
+** the pager to transition into the ERROR state may have occurred.
** The first argument is a pointer to the pager structure, the second
** the error-code about to be returned by a pager API function. The
** value returned is a copy of the second argument to this function.
@@ -57885,6 +58859,9 @@ static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){
return (rc==SQLITE_OK?rc2:rc);
}
+/* Forward reference */
+static int pager_playback(Pager *pPager, int isHot);
+
/*
** Execute a rollback if a transaction is active and unlock the
** database file.
@@ -57913,13 +58890,28 @@ static void pagerUnlockAndRollback(Pager *pPager){
assert( pPager->eState==PAGER_READER );
pager_end_transaction(pPager, 0, 0);
}
+ }else if( pPager->eState==PAGER_ERROR
+ && pPager->journalMode==PAGER_JOURNALMODE_MEMORY
+ && isOpen(pPager->jfd)
+ ){
+ /* Special case for a ROLLBACK due to I/O error with an in-memory
+ ** journal: We have to rollback immediately, before the journal is
+ ** closed, because once it is closed, all content is forgotten. */
+ int errCode = pPager->errCode;
+ u8 eLock = pPager->eLock;
+ pPager->eState = PAGER_OPEN;
+ pPager->errCode = SQLITE_OK;
+ pPager->eLock = EXCLUSIVE_LOCK;
+ pager_playback(pPager, 1);
+ pPager->errCode = errCode;
+ pPager->eLock = eLock;
}
pager_unlock(pPager);
}
/*
** Parameter aData must point to a buffer of pPager->pageSize bytes
-** of data. Compute and return a checksum based ont the contents of the
+** of data. Compute and return a checksum based on the contents of the
** page of data and the current value of pPager->cksumInit.
**
** This is not a real checksum. It is really just the sum of the
@@ -58885,7 +59877,7 @@ static int pagerWalFrames(
assert( pPager->pWal );
assert( pList );
#ifdef SQLITE_DEBUG
- /* Verify that the page list is in accending order */
+ /* Verify that the page list is in ascending order */
for(p=pList; p && p->pDirty; p=p->pDirty){
assert( p->pgno < p->pDirty->pgno );
}
@@ -59016,7 +60008,7 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){
#ifndef SQLITE_OMIT_WAL
/*
** Check if the *-wal file that corresponds to the database opened by pPager
-** exists if the database is not empy, or verify that the *-wal file does
+** exists if the database is not empty, or verify that the *-wal file does
** not exist (by deleting it) if the database file is empty.
**
** If the database is not empty and the *-wal file exists, open the pager
@@ -60426,11 +61418,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
int rc = SQLITE_OK; /* Return code */
int tempFile = 0; /* True for temp files (incl. in-memory files) */
int memDb = 0; /* True if this is an in-memory file */
-#ifndef SQLITE_OMIT_DESERIALIZE
int memJM = 0; /* Memory journal mode */
-#else
-# define memJM 0
-#endif
int readOnly = 0; /* True if this is a read-only file */
int journalFileSize; /* Bytes to allocate for each journal fd */
char *zPathname = 0; /* Full path to database file */
@@ -60549,12 +61537,13 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
** specific formatting and order of the various filenames, so if the format
** changes here, be sure to change it there as well.
*/
+ assert( SQLITE_PTRSIZE==sizeof(Pager*) );
pPtr = (u8 *)sqlite3MallocZero(
ROUND8(sizeof(*pPager)) + /* Pager structure */
ROUND8(pcacheSize) + /* PCache object */
ROUND8(pVfs->szOsFile) + /* The main db file */
journalFileSize * 2 + /* The two journal files */
- sizeof(pPager) + /* Space to hold a pointer */
+ SQLITE_PTRSIZE + /* Space to hold a pointer */
4 + /* Database prefix */
nPathname + 1 + /* database filename */
nUriByte + /* query parameters */
@@ -60575,7 +61564,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
pPager->sjfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
pPager->jfd = (sqlite3_file*)pPtr; pPtr += journalFileSize;
assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) );
- memcpy(pPtr, &pPager, sizeof(pPager)); pPtr += sizeof(pPager);
+ memcpy(pPtr, &pPager, SQLITE_PTRSIZE); pPtr += SQLITE_PTRSIZE;
/* Fill in the Pager.zFilename and pPager.zQueryParam fields */
pPtr += 4; /* Skip zero prefix */
@@ -60629,9 +61618,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
int fout = 0; /* VFS flags returned by xOpen() */
rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout);
assert( !memDb );
-#ifndef SQLITE_OMIT_DESERIALIZE
pPager->memVfs = memJM = (fout&SQLITE_OPEN_MEMORY)!=0;
-#endif
readOnly = (fout&SQLITE_OPEN_READONLY)!=0;
/* If the file was successfully opened for read/write access,
@@ -60768,15 +61755,18 @@ act_like_temp_file:
/*
** Return the sqlite3_file for the main database given the name
-** of the corresonding WAL or Journal name as passed into
+** of the corresponding WAL or Journal name as passed into
** xOpen.
*/
SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){
Pager *pPager;
+ const char *p;
while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){
zName--;
}
- pPager = *(Pager**)(zName - 4 - sizeof(Pager*));
+ p = zName - 4 - sizeof(Pager*);
+ assert( EIGHT_BYTE_ALIGNMENT(p) );
+ pPager = *(Pager**)p;
return pPager->fd;
}
@@ -61410,8 +62400,20 @@ SQLITE_PRIVATE int sqlite3PagerGet(
DbPage **ppPage, /* Write a pointer to the page here */
int flags /* PAGER_GET_XXX flags */
){
- /* printf("PAGE %u\n", pgno); fflush(stdout); */
+#if 0 /* Trace page fetch by setting to 1 */
+ int rc;
+ printf("PAGE %u\n", pgno);
+ fflush(stdout);
+ rc = pPager->xGet(pPager, pgno, ppPage, flags);
+ if( rc ){
+ printf("PAGE %u failed with 0x%02x\n", pgno, rc);
+ fflush(stdout);
+ }
+ return rc;
+#else
+ /* Normal, high-speed version of sqlite3PagerGet() */
return pPager->xGet(pPager, pgno, ppPage, flags);
+#endif
}
/*
@@ -62287,6 +63289,13 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0);
if( rc==SQLITE_OK ){
rc = pager_write_pagelist(pPager, pList);
+ if( rc==SQLITE_OK && pPager->dbSize>pPager->dbFileSize ){
+ char *pTmp = pPager->pTmpSpace;
+ int szPage = (int)pPager->pageSize;
+ memset(pTmp, 0, szPage);
+ rc = sqlite3OsWrite(pPager->fd, pTmp, szPage,
+ ((i64)pPager->dbSize*pPager->pageSize)-szPage);
+ }
if( rc==SQLITE_OK ){
rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0);
}
@@ -62521,11 +63530,11 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){
a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize;
a[4] = pPager->eState;
a[5] = pPager->errCode;
- a[6] = pPager->aStat[PAGER_STAT_HIT];
- a[7] = pPager->aStat[PAGER_STAT_MISS];
+ a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff;
+ a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff;
a[8] = 0; /* Used to be pPager->nOvfl */
a[9] = pPager->nRead;
- a[10] = pPager->aStat[PAGER_STAT_WRITE];
+ a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff;
return a;
}
#endif
@@ -62541,7 +63550,7 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){
** reset parameter is non-zero, the cache hit or miss count is zeroed before
** returning.
*/
-SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){
+SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){
assert( eStat==SQLITE_DBSTATUS_CACHE_HIT
|| eStat==SQLITE_DBSTATUS_CACHE_MISS
@@ -63053,7 +64062,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
assert( pPager->eState!=PAGER_ERROR );
pPager->journalMode = (u8)eMode;
- /* When transistioning from TRUNCATE or PERSIST to any other journal
+ /* When transitioning from TRUNCATE or PERSIST to any other journal
** mode except WAL, unless the pager is in locking_mode=exclusive mode,
** delete the journal file.
*/
@@ -63098,7 +64107,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
}
assert( state==pPager->eState );
}
- }else if( eMode==PAGER_JOURNALMODE_OFF ){
+ }else if( eMode==PAGER_JOURNALMODE_OFF || eMode==PAGER_JOURNALMODE_MEMORY ){
sqlite3OsClose(pPager->jfd);
}
}
@@ -63481,6 +64490,12 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){
}
#endif
+#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL)
+SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){
+ return sqlite3WalSystemErrno(pPager->pWal);
+}
+#endif
+
#endif /* SQLITE_OMIT_DISKIO */
/************** End of pager.c ***********************************************/
@@ -63771,7 +64786,7 @@ SQLITE_PRIVATE int sqlite3WalTrace = 0;
**
** Technically, the various VFSes are free to implement these locks however
** they see fit. However, compatibility is encouraged so that VFSes can
-** interoperate. The standard implemention used on both unix and windows
+** interoperate. The standard implementation used on both unix and windows
** is for the index number to indicate a byte offset into the
** WalCkptInfo.aLock[] array in the wal-index header. In other words, all
** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which
@@ -63847,7 +64862,7 @@ struct WalIndexHdr {
** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff)
** for any aReadMark[] means that entry is unused. aReadMark[0] is
** a special case; its value is never used and it exists as a place-holder
-** to avoid having to offset aReadMark[] indexs by one. Readers holding
+** to avoid having to offset aReadMark[] indexes by one. Readers holding
** WAL_READ_LOCK(0) always ignore the entire WAL and read all content
** directly from the database.
**
@@ -64015,7 +65030,15 @@ struct Wal {
u32 iReCksum; /* On commit, recalculate checksums from here */
const char *zWalName; /* Name of WAL file */
u32 nCkpt; /* Checkpoint sequence counter in the wal-header */
+#ifdef SQLITE_USE_SEH
+ u32 lockMask; /* Mask of locks held */
+ void *pFree; /* Pointer to sqlite3_free() if exception thrown */
+ u32 *pWiValue; /* Value to write into apWiData[iWiPg] */
+ int iWiPg; /* Write pWiValue into apWiData[iWiPg] */
+ int iSysErrno; /* System error code following exception */
+#endif
#ifdef SQLITE_DEBUG
+ int nSehTry; /* Number of nested SEH_TRY{} blocks */
u8 lockError; /* True if a locking error has occurred */
#endif
#ifdef SQLITE_ENABLE_SNAPSHOT
@@ -64098,6 +65121,113 @@ struct WalIterator {
)
/*
+** Structured Exception Handling (SEH) is a Windows-specific technique
+** for catching exceptions raised while accessing memory-mapped files.
+**
+** The -DSQLITE_USE_SEH compile-time option means to use SEH to catch and
+** deal with system-level errors that arise during WAL -shm file processing.
+** Without this compile-time option, any system-level faults that appear
+** while accessing the memory-mapped -shm file will cause a process-wide
+** signal to be deliver, which will more than likely cause the entire
+** process to exit.
+*/
+#ifdef SQLITE_USE_SEH
+#include <Windows.h>
+
+/* Beginning of a block of code in which an exception might occur */
+# define SEH_TRY __try { \
+ assert( walAssertLockmask(pWal) && pWal->nSehTry==0 ); \
+ VVA_ONLY(pWal->nSehTry++);
+
+/* The end of a block of code in which an exception might occur */
+# define SEH_EXCEPT(X) \
+ VVA_ONLY(pWal->nSehTry--); \
+ assert( pWal->nSehTry==0 ); \
+ } __except( sehExceptionFilter(pWal, GetExceptionCode(), GetExceptionInformation() ) ){ X }
+
+/* Simulate a memory-mapping fault in the -shm file for testing purposes */
+# define SEH_INJECT_FAULT sehInjectFault(pWal)
+
+/*
+** The second argument is the return value of GetExceptionCode() for the
+** current exception. Return EXCEPTION_EXECUTE_HANDLER if the exception code
+** indicates that the exception may have been caused by accessing the *-shm
+** file mapping. Or EXCEPTION_CONTINUE_SEARCH otherwise.
+*/
+static int sehExceptionFilter(Wal *pWal, int eCode, EXCEPTION_POINTERS *p){
+ VVA_ONLY(pWal->nSehTry--);
+ if( eCode==EXCEPTION_IN_PAGE_ERROR ){
+ if( p && p->ExceptionRecord && p->ExceptionRecord->NumberParameters>=3 ){
+ /* From MSDN: For this type of exception, the first element of the
+ ** ExceptionInformation[] array is a read-write flag - 0 if the exception
+ ** was thrown while reading, 1 if while writing. The second element is
+ ** the virtual address being accessed. The "third array element specifies
+ ** the underlying NTSTATUS code that resulted in the exception". */
+ pWal->iSysErrno = (int)p->ExceptionRecord->ExceptionInformation[2];
+ }
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+/*
+** If one is configured, invoke the xTestCallback callback with 650 as
+** the argument. If it returns true, throw the same exception that is
+** thrown by the system if the *-shm file mapping is accessed after it
+** has been invalidated.
+*/
+static void sehInjectFault(Wal *pWal){
+ int res;
+ assert( pWal->nSehTry>0 );
+
+ res = sqlite3FaultSim(650);
+ if( res!=0 ){
+ ULONG_PTR aArg[3];
+ aArg[0] = 0;
+ aArg[1] = 0;
+ aArg[2] = (ULONG_PTR)res;
+ RaiseException(EXCEPTION_IN_PAGE_ERROR, 0, 3, (const ULONG_PTR*)aArg);
+ }
+}
+
+/*
+** There are two ways to use this macro. To set a pointer to be freed
+** if an exception is thrown:
+**
+** SEH_FREE_ON_ERROR(0, pPtr);
+**
+** and to cancel the same:
+**
+** SEH_FREE_ON_ERROR(pPtr, 0);
+**
+** In the first case, there must not already be a pointer registered to
+** be freed. In the second case, pPtr must be the registered pointer.
+*/
+#define SEH_FREE_ON_ERROR(X,Y) \
+ assert( (X==0 || Y==0) && pWal->pFree==X ); pWal->pFree = Y
+
+/*
+** There are two ways to use this macro. To arrange for pWal->apWiData[iPg]
+** to be set to pValue if an exception is thrown:
+**
+** SEH_SET_ON_ERROR(iPg, pValue);
+**
+** and to cancel the same:
+**
+** SEH_SET_ON_ERROR(0, 0);
+*/
+#define SEH_SET_ON_ERROR(X,Y) pWal->iWiPg = X; pWal->pWiValue = Y
+
+#else
+# define SEH_TRY VVA_ONLY(pWal->nSehTry++);
+# define SEH_EXCEPT(X) VVA_ONLY(pWal->nSehTry--); assert( pWal->nSehTry==0 );
+# define SEH_INJECT_FAULT assert( pWal->nSehTry>0 );
+# define SEH_FREE_ON_ERROR(X,Y)
+# define SEH_SET_ON_ERROR(X,Y)
+#endif /* ifdef SQLITE_USE_SEH */
+
+
+/*
** Obtain a pointer to the iPage'th page of the wal-index. The wal-index
** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are
** numbered from zero.
@@ -64169,6 +65299,7 @@ static int walIndexPage(
int iPage, /* The page we seek */
volatile u32 **ppPage /* Write the page pointer here */
){
+ SEH_INJECT_FAULT;
if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){
return walIndexPageRealloc(pWal, iPage, ppPage);
}
@@ -64180,6 +65311,7 @@ static int walIndexPage(
*/
static volatile WalCkptInfo *walCkptInfo(Wal *pWal){
assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ SEH_INJECT_FAULT;
return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]);
}
@@ -64188,6 +65320,7 @@ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){
*/
static volatile WalIndexHdr *walIndexHdr(Wal *pWal){
assert( pWal->nWiData>0 && pWal->apWiData[0] );
+ SEH_INJECT_FAULT;
return (volatile WalIndexHdr*)pWal->apWiData[0];
}
@@ -64377,7 +65510,7 @@ static int walDecodeFrame(
return 0;
}
- /* A frame is only valid if the page number is creater than zero.
+ /* A frame is only valid if the page number is greater than zero.
*/
pgno = sqlite3Get4byte(&aFrame[0]);
if( pgno==0 ){
@@ -64385,7 +65518,7 @@ static int walDecodeFrame(
}
/* A frame is only valid if a checksum of the WAL header,
- ** all prior frams, the first 16 bytes of this frame-header,
+ ** all prior frames, the first 16 bytes of this frame-header,
** and the frame-data matches the checksum in the last 8
** bytes of this frame-header.
*/
@@ -64445,12 +65578,18 @@ static int walLockShared(Wal *pWal, int lockIdx){
WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal,
walLockName(lockIdx), rc ? "failed" : "ok"));
VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+#ifdef SQLITE_USE_SEH
+ if( rc==SQLITE_OK ) pWal->lockMask |= (1 << lockIdx);
+#endif
return rc;
}
static void walUnlockShared(Wal *pWal, int lockIdx){
if( pWal->exclusiveMode ) return;
(void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1,
SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED);
+#ifdef SQLITE_USE_SEH
+ pWal->lockMask &= ~(1 << lockIdx);
+#endif
WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx)));
}
static int walLockExclusive(Wal *pWal, int lockIdx, int n){
@@ -64461,12 +65600,20 @@ static int walLockExclusive(Wal *pWal, int lockIdx, int n){
WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal,
walLockName(lockIdx), n, rc ? "failed" : "ok"));
VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); )
+#ifdef SQLITE_USE_SEH
+ if( rc==SQLITE_OK ){
+ pWal->lockMask |= (((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx));
+ }
+#endif
return rc;
}
static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){
if( pWal->exclusiveMode ) return;
(void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n,
SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE);
+#ifdef SQLITE_USE_SEH
+ pWal->lockMask &= ~(((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx));
+#endif
WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal,
walLockName(lockIdx), n));
}
@@ -64558,6 +65705,7 @@ static int walFramePage(u32 iFrame){
*/
static u32 walFramePgno(Wal *pWal, u32 iFrame){
int iHash = walFramePage(iFrame);
+ SEH_INJECT_FAULT;
if( iHash==0 ){
return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1];
}
@@ -64817,6 +65965,7 @@ static int walIndexRecover(Wal *pWal){
/* Malloc a buffer to read frames into. */
szFrame = szPage + WAL_FRAME_HDRSIZE;
aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ);
+ SEH_FREE_ON_ERROR(0, aFrame);
if( !aFrame ){
rc = SQLITE_NOMEM_BKPT;
goto recovery_error;
@@ -64835,6 +65984,7 @@ static int walIndexRecover(Wal *pWal){
rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare);
assert( aShare!=0 || rc!=SQLITE_OK );
if( aShare==0 ) break;
+ SEH_SET_ON_ERROR(iPg, aShare);
pWal->apWiData[iPg] = aPrivate;
for(iFrame=iFirst; iFrame<=iLast; iFrame++){
@@ -64862,6 +66012,7 @@ static int walIndexRecover(Wal *pWal){
}
}
pWal->apWiData[iPg] = aShare;
+ SEH_SET_ON_ERROR(0,0);
nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0);
nHdr32 = nHdr / sizeof(u32);
#ifndef SQLITE_SAFER_WALINDEX_RECOVERY
@@ -64892,9 +66043,11 @@ static int walIndexRecover(Wal *pWal){
}
}
#endif
+ SEH_INJECT_FAULT;
if( iFrame<=iLast ) break;
}
+ SEH_FREE_ON_ERROR(aFrame, 0);
sqlite3_free(aFrame);
}
@@ -64922,6 +66075,7 @@ finished:
}else{
pInfo->aReadMark[i] = READMARK_NOT_USED;
}
+ SEH_INJECT_FAULT;
walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
}else if( rc!=SQLITE_BUSY ){
goto recovery_error;
@@ -65079,7 +66233,7 @@ SQLITE_PRIVATE int sqlite3WalOpen(
}
/*
-** Change the size to which the WAL file is trucated on each reset.
+** Change the size to which the WAL file is truncated on each reset.
*/
SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){
if( pWal ) pWal->mxWalSize = iLimit;
@@ -65305,23 +66459,16 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
nByte = sizeof(WalIterator)
+ (nSegment-1)*sizeof(struct WalSegment)
+ iLast*sizeof(ht_slot);
- p = (WalIterator *)sqlite3_malloc64(nByte);
+ p = (WalIterator *)sqlite3_malloc64(nByte
+ + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
+ );
if( !p ){
return SQLITE_NOMEM_BKPT;
}
memset(p, 0, nByte);
p->nSegment = nSegment;
-
- /* Allocate temporary space used by the merge-sort routine. This block
- ** of memory will be freed before this function returns.
- */
- aTmp = (ht_slot *)sqlite3_malloc64(
- sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast)
- );
- if( !aTmp ){
- rc = SQLITE_NOMEM_BKPT;
- }
-
+ aTmp = (ht_slot*)&(((u8*)p)[nByte]);
+ SEH_FREE_ON_ERROR(0, p);
for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && i<nSegment; i++){
WalHashLoc sLoc;
@@ -65349,9 +66496,8 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
p->aSegment[i].aPgno = (u32 *)sLoc.aPgno;
}
}
- sqlite3_free(aTmp);
-
if( rc!=SQLITE_OK ){
+ SEH_FREE_ON_ERROR(p, 0);
walIteratorFree(p);
p = 0;
}
@@ -65360,6 +66506,19 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){
}
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+
+
+/*
+** Attempt to enable blocking locks that block for nMs ms. Return 1 if
+** blocking locks are successfully enabled, or 0 otherwise.
+*/
+static int walEnableBlockingMs(Wal *pWal, int nMs){
+ int rc = sqlite3OsFileControl(
+ pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs
+ );
+ return (rc==SQLITE_OK);
+}
+
/*
** Attempt to enable blocking locks. Blocking locks are enabled only if (a)
** they are supported by the VFS, and (b) the database handle is configured
@@ -65371,11 +66530,7 @@ static int walEnableBlocking(Wal *pWal){
if( pWal->db ){
int tmout = pWal->db->busyTimeout;
if( tmout ){
- int rc;
- rc = sqlite3OsFileControl(
- pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout
- );
- res = (rc==SQLITE_OK);
+ res = walEnableBlockingMs(pWal, tmout);
}
}
return res;
@@ -65424,20 +66579,10 @@ SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){
pWal->db = db;
}
-/*
-** Take an exclusive WRITE lock. Blocking if so configured.
-*/
-static int walLockWriter(Wal *pWal){
- int rc;
- walEnableBlocking(pWal);
- rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1);
- walDisableBlocking(pWal);
- return rc;
-}
#else
# define walEnableBlocking(x) 0
# define walDisableBlocking(x)
-# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1)
+# define walEnableBlockingMs(pWal, ms) 0
# define sqlite3WalDb(pWal, db)
#endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */
@@ -65577,13 +66722,13 @@ static int walCheckpoint(
mxSafeFrame = pWal->hdr.mxFrame;
mxPage = pWal->hdr.nPage;
for(i=1; i<WAL_NREADER; i++){
- u32 y = AtomicLoad(pInfo->aReadMark+i);
+ u32 y = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT;
if( mxSafeFrame>y ){
assert( y<=pWal->hdr.mxFrame );
rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1);
if( rc==SQLITE_OK ){
u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED);
- AtomicStore(pInfo->aReadMark+i, iMark);
+ AtomicStore(pInfo->aReadMark+i, iMark); SEH_INJECT_FAULT;
walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
}else if( rc==SQLITE_BUSY ){
mxSafeFrame = y;
@@ -65604,8 +66749,7 @@ static int walCheckpoint(
&& (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK
){
u32 nBackfill = pInfo->nBackfill;
-
- pInfo->nBackfillAttempted = mxSafeFrame;
+ pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT;
/* Sync the WAL to disk */
rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));
@@ -65636,6 +66780,7 @@ static int walCheckpoint(
while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
i64 iOffset;
assert( walFramePgno(pWal, iFrame)==iDbpage );
+ SEH_INJECT_FAULT;
if( AtomicLoad(&db->u1.isInterrupted) ){
rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT;
break;
@@ -65665,7 +66810,7 @@ static int walCheckpoint(
}
}
if( rc==SQLITE_OK ){
- AtomicStore(&pInfo->nBackfill, mxSafeFrame);
+ AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT;
}
}
@@ -65687,6 +66832,7 @@ static int walCheckpoint(
*/
if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){
assert( pWal->writeLock );
+ SEH_INJECT_FAULT;
if( pInfo->nBackfill<pWal->hdr.mxFrame ){
rc = SQLITE_BUSY;
}else if( eMode>=SQLITE_CHECKPOINT_RESTART ){
@@ -65718,6 +66864,7 @@ static int walCheckpoint(
}
walcheckpoint_out:
+ SEH_FREE_ON_ERROR(pIter, 0);
walIteratorFree(pIter);
return rc;
}
@@ -65740,6 +66887,93 @@ static void walLimitSize(Wal *pWal, i64 nMax){
}
}
+#ifdef SQLITE_USE_SEH
+/*
+** This is the "standard" exception handler used in a few places to handle
+** an exception thrown by reading from the *-shm mapping after it has become
+** invalid in SQLITE_USE_SEH builds. It is used as follows:
+**
+** SEH_TRY { ... }
+** SEH_EXCEPT( rc = walHandleException(pWal); )
+**
+** This function does three things:
+**
+** 1) Determines the locks that should be held, based on the contents of
+** the Wal.readLock, Wal.writeLock and Wal.ckptLock variables. All other
+** held locks are assumed to be transient locks that would have been
+** released had the exception not been thrown and are dropped.
+**
+** 2) Frees the pointer at Wal.pFree, if any, using sqlite3_free().
+**
+** 3) Set pWal->apWiData[pWal->iWiPg] to pWal->pWiValue if not NULL
+**
+** 4) Returns SQLITE_IOERR.
+*/
+static int walHandleException(Wal *pWal){
+ if( pWal->exclusiveMode==0 ){
+ static const int S = 1;
+ static const int E = (1<<SQLITE_SHM_NLOCK);
+ int ii;
+ u32 mUnlock = pWal->lockMask & ~(
+ (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock)))
+ | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0)
+ | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0)
+ );
+ for(ii=0; ii<SQLITE_SHM_NLOCK; ii++){
+ if( (S<<ii) & mUnlock ) walUnlockShared(pWal, ii);
+ if( (E<<ii) & mUnlock ) walUnlockExclusive(pWal, ii, 1);
+ }
+ }
+ sqlite3_free(pWal->pFree);
+ pWal->pFree = 0;
+ if( pWal->pWiValue ){
+ pWal->apWiData[pWal->iWiPg] = pWal->pWiValue;
+ pWal->pWiValue = 0;
+ }
+ return SQLITE_IOERR_IN_PAGE;
+}
+
+/*
+** Assert that the Wal.lockMask mask, which indicates the locks held
+** by the connenction, is consistent with the Wal.readLock, Wal.writeLock
+** and Wal.ckptLock variables. To be used as:
+**
+** assert( walAssertLockmask(pWal) );
+*/
+static int walAssertLockmask(Wal *pWal){
+ if( pWal->exclusiveMode==0 ){
+ static const int S = 1;
+ static const int E = (1<<SQLITE_SHM_NLOCK);
+ u32 mExpect = (
+ (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock)))
+ | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0)
+ | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0)
+#ifdef SQLITE_ENABLE_SNAPSHOT
+ | (pWal->pSnapshot ? (pWal->lockMask & (1 << WAL_CKPT_LOCK)) : 0)
+#endif
+ );
+ assert( mExpect==pWal->lockMask );
+ }
+ return 1;
+}
+
+/*
+** Return and zero the "system error" field set when an
+** EXCEPTION_IN_PAGE_ERROR exception is caught.
+*/
+SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal *pWal){
+ int iRet = 0;
+ if( pWal ){
+ iRet = pWal->iSysErrno;
+ pWal->iSysErrno = 0;
+ }
+ return iRet;
+}
+
+#else
+# define walAssertLockmask(x) 1
+#endif /* ifdef SQLITE_USE_SEH */
+
/*
** Close a connection to a log file.
*/
@@ -65754,6 +66988,8 @@ SQLITE_PRIVATE int sqlite3WalClose(
if( pWal ){
int isDelete = 0; /* True to unlink wal and wal-index files */
+ assert( walAssertLockmask(pWal) );
+
/* If an EXCLUSIVE lock can be obtained on the database file (using the
** ordinary, rollback-mode locking methods, this guarantees that the
** connection associated with this log file is the only connection to
@@ -65778,7 +67014,7 @@ SQLITE_PRIVATE int sqlite3WalClose(
);
if( bPersist!=1 ){
/* Try to delete the WAL file if the checkpoint completed and
- ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal
+ ** fsynced (rc==SQLITE_OK) and if we are not in persistent-wal
** mode (!bPersist) */
isDelete = 1;
}else if( pWal->mxWalSize>=0 ){
@@ -65845,7 +67081,7 @@ static SQLITE_NO_TSAN int walIndexTryHdr(Wal *pWal, int *pChanged){
** give false-positive warnings about these accesses because the tools do not
** account for the double-read and the memory barrier. The use of mutexes
** here would be problematic as the memory being accessed is potentially
- ** shared among multiple processes and not all mutex implementions work
+ ** shared among multiple processes and not all mutex implementations work
** reliably in that environment.
*/
aHdr = walIndexHdr(pWal);
@@ -65947,7 +67183,9 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){
}
}else{
int bWriteLock = pWal->writeLock;
- if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){
+ if( bWriteLock
+ || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1))
+ ){
pWal->writeLock = 1;
if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){
badHdr = walIndexTryHdr(pWal, pChanged);
@@ -65955,7 +67193,8 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){
/* If the wal-index header is still malformed even while holding
** a WRITE lock, it can only mean that the header is corrupted and
** needs to be reconstructed. So run recovery to do exactly that.
- */
+ ** Disable blocking locks first. */
+ walDisableBlocking(pWal);
rc = walIndexRecover(pWal);
*pChanged = 1;
}
@@ -66166,6 +67405,37 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){
}
/*
+** The final argument passed to walTryBeginRead() is of type (int*). The
+** caller should invoke walTryBeginRead as follows:
+**
+** int cnt = 0;
+** do {
+** rc = walTryBeginRead(..., &cnt);
+** }while( rc==WAL_RETRY );
+**
+** The final value of "cnt" is of no use to the caller. It is used by
+** the implementation of walTryBeginRead() as follows:
+**
+** + Each time walTryBeginRead() is called, it is incremented. Once
+** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead()
+** has many times been invoked and failed with WAL_RETRY - walTryBeginRead()
+** returns SQLITE_PROTOCOL.
+**
+** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed
+** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS
+** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case
+** the next invocation of walTryBeginRead() may omit an expected call to
+** sqlite3OsSleep(). There has already been a delay when the previous call
+** waited on a lock.
+*/
+#define WAL_RETRY_PROTOCOL_LIMIT 100
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+# define WAL_RETRY_BLOCKED_MASK 0x10000000
+#else
+# define WAL_RETRY_BLOCKED_MASK 0
+#endif
+
+/*
** Attempt to start a read transaction. This might fail due to a race or
** other transient condition. When that happens, it returns WAL_RETRY to
** indicate to the caller that it is safe to retry immediately.
@@ -66215,13 +67485,16 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){
** so it takes care to hold an exclusive lock on the corresponding
** WAL_READ_LOCK() while changing values.
*/
-static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
+static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){
volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */
u32 mxReadMark; /* Largest aReadMark[] value */
int mxI; /* Index of largest aReadMark[] value */
int i; /* Loop counter */
int rc = SQLITE_OK; /* Return code */
u32 mxFrame; /* Wal frame to lock to */
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ int nBlockTmout = 0;
+#endif
assert( pWal->readLock<0 ); /* Not currently locked */
@@ -66245,14 +67518,34 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
** The total delay time before giving up is less than 10 seconds.
*/
- if( cnt>5 ){
+ (*pCnt)++;
+ if( *pCnt>5 ){
int nDelay = 1; /* Pause time in microseconds */
- if( cnt>100 ){
+ int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK);
+ if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){
VVA_ONLY( pWal->lockError = 1; )
return SQLITE_PROTOCOL;
}
- if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+ if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor
+ ** to block for locks for approximately nDelay us. This affects three
+ ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if
+ ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the
+ ** first attempted read fails, and (c) the shared lock taken on the
+ ** read-mark.
+ **
+ ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error,
+ ** then sleep for the minimum of 1us. The previous call already provided
+ ** an extra delay while it was blocking on the lock.
+ */
+ nBlockTmout = (nDelay+998) / 1000;
+ if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){
+ if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1;
+ }
+#endif
sqlite3OsSleep(pWal->pVfs, nDelay);
+ *pCnt &= ~WAL_RETRY_BLOCKED_MASK;
}
if( !useWal ){
@@ -66260,6 +67553,13 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
if( pWal->bShmUnreliable==0 ){
rc = walIndexReadHdr(pWal, pChanged);
}
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ walDisableBlocking(pWal);
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ rc = SQLITE_BUSY;
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#endif
if( rc==SQLITE_BUSY ){
/* If there is not a recovery running in another thread or process
** then convert BUSY errors to WAL_RETRY. If recovery is known to
@@ -66296,6 +67596,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
assert( pWal->nWiData>0 );
assert( pWal->apWiData[0]!=0 );
pInfo = walCkptInfo(pWal);
+ SEH_INJECT_FAULT;
if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame
#ifdef SQLITE_ENABLE_SNAPSHOT
&& (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0)
@@ -66345,7 +67646,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
}
#endif
for(i=1; i<WAL_NREADER; i++){
- u32 thisMark = AtomicLoad(pInfo->aReadMark+i);
+ u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT;
if( mxReadMark<=thisMark && thisMark<=mxFrame ){
assert( thisMark!=READMARK_NOT_USED );
mxReadMark = thisMark;
@@ -66373,9 +67674,19 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT;
}
+ (void)walEnableBlockingMs(pWal, nBlockTmout);
rc = walLockShared(pWal, WAL_READ_LOCK(mxI));
+ walDisableBlocking(pWal);
if( rc ){
- return rc==SQLITE_BUSY ? WAL_RETRY : rc;
+#ifdef SQLITE_ENABLE_SETLK_TIMEOUT
+ if( rc==SQLITE_BUSY_TIMEOUT ){
+ *pCnt |= WAL_RETRY_BLOCKED_MASK;
+ }
+#else
+ assert( rc!=SQLITE_BUSY_TIMEOUT );
+#endif
+ assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT );
+ return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc;
}
/* Now that the read-lock has been obtained, check that neither the
** value in the aReadMark[] array or the contents of the wal-index
@@ -66411,7 +67722,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** we can guarantee that the checkpointer that set nBackfill could not
** see any pages past pWal->hdr.mxFrame, this problem does not come up.
*/
- pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1;
+ pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT;
walShmBarrier(pWal);
if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark
|| memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))
@@ -66427,6 +67738,54 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
#ifdef SQLITE_ENABLE_SNAPSHOT
/*
+** This function does the work of sqlite3WalSnapshotRecover().
+*/
+static int walSnapshotRecover(
+ Wal *pWal, /* WAL handle */
+ void *pBuf1, /* Temp buffer pWal->szPage bytes in size */
+ void *pBuf2 /* Temp buffer pWal->szPage bytes in size */
+){
+ int szPage = (int)pWal->szPage;
+ int rc;
+ i64 szDb; /* Size of db file in bytes */
+
+ rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
+ if( rc==SQLITE_OK ){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ u32 i = pInfo->nBackfillAttempted;
+ for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){
+ WalHashLoc sLoc; /* Hash table location */
+ u32 pgno; /* Page number in db file */
+ i64 iDbOff; /* Offset of db file entry */
+ i64 iWalOff; /* Offset of wal file entry */
+
+ rc = walHashGet(pWal, walFramePage(i), &sLoc);
+ if( rc!=SQLITE_OK ) break;
+ assert( i - sLoc.iZero - 1 >=0 );
+ pgno = sLoc.aPgno[i-sLoc.iZero-1];
+ iDbOff = (i64)(pgno-1) * szPage;
+
+ if( iDbOff+szPage<=szDb ){
+ iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
+ rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
+ }
+
+ if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
+ break;
+ }
+ }
+
+ pInfo->nBackfillAttempted = i-1;
+ }
+ }
+
+ return rc;
+}
+
+/*
** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted
** variable so that older snapshots can be accessed. To do this, loop
** through all wal frames from nBackfillAttempted to (nBackfill+1),
@@ -66451,50 +67810,21 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
assert( pWal->readLock>=0 );
rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
if( rc==SQLITE_OK ){
- volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
- int szPage = (int)pWal->szPage;
- i64 szDb; /* Size of db file in bytes */
-
- rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
- if( rc==SQLITE_OK ){
- void *pBuf1 = sqlite3_malloc(szPage);
- void *pBuf2 = sqlite3_malloc(szPage);
- if( pBuf1==0 || pBuf2==0 ){
- rc = SQLITE_NOMEM;
- }else{
- u32 i = pInfo->nBackfillAttempted;
- for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){
- WalHashLoc sLoc; /* Hash table location */
- u32 pgno; /* Page number in db file */
- i64 iDbOff; /* Offset of db file entry */
- i64 iWalOff; /* Offset of wal file entry */
-
- rc = walHashGet(pWal, walFramePage(i), &sLoc);
- if( rc!=SQLITE_OK ) break;
- assert( i - sLoc.iZero - 1 >=0 );
- pgno = sLoc.aPgno[i-sLoc.iZero-1];
- iDbOff = (i64)(pgno-1) * szPage;
-
- if( iDbOff+szPage<=szDb ){
- iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
- rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
-
- if( rc==SQLITE_OK ){
- rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
- }
-
- if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
- break;
- }
- }
-
- pInfo->nBackfillAttempted = i-1;
- }
+ void *pBuf1 = sqlite3_malloc(pWal->szPage);
+ void *pBuf2 = sqlite3_malloc(pWal->szPage);
+ if( pBuf1==0 || pBuf2==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pWal->ckptLock = 1;
+ SEH_TRY {
+ rc = walSnapshotRecover(pWal, pBuf1, pBuf2);
}
-
- sqlite3_free(pBuf1);
- sqlite3_free(pBuf2);
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+ pWal->ckptLock = 0;
}
+
+ sqlite3_free(pBuf1);
+ sqlite3_free(pBuf2);
walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1);
}
@@ -66503,28 +67833,20 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
#endif /* SQLITE_ENABLE_SNAPSHOT */
/*
-** Begin a read transaction on the database.
-**
-** This routine used to be called sqlite3OpenSnapshot() and with good reason:
-** it takes a snapshot of the state of the WAL and wal-index for the current
-** instant in time. The current thread will continue to use this snapshot.
-** Other threads might append new content to the WAL and wal-index but
-** that extra content is ignored by the current thread.
-**
-** If the database contents have changes since the previous read
-** transaction, then *pChanged is set to 1 before returning. The
-** Pager layer will use this to know that its cache is stale and
-** needs to be flushed.
+** This function does the work of sqlite3WalBeginReadTransaction() (see
+** below). That function simply calls this one inside an SEH_TRY{...} block.
*/
-SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
+static int walBeginReadTransaction(Wal *pWal, int *pChanged){
int rc; /* Return code */
int cnt = 0; /* Number of TryBeginRead attempts */
#ifdef SQLITE_ENABLE_SNAPSHOT
+ int ckptLock = 0;
int bChanged = 0;
WalIndexHdr *pSnapshot = pWal->pSnapshot;
#endif
assert( pWal->ckptLock==0 );
+ assert( pWal->nSehTry>0 );
#ifdef SQLITE_ENABLE_SNAPSHOT
if( pSnapshot ){
@@ -66547,12 +67869,12 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
if( rc!=SQLITE_OK ){
return rc;
}
- pWal->ckptLock = 1;
+ ckptLock = 1;
}
#endif
do{
- rc = walTryBeginRead(pWal, pChanged, 0, ++cnt);
+ rc = walTryBeginRead(pWal, pChanged, 0, &cnt);
}while( rc==WAL_RETRY );
testcase( (rc&0xff)==SQLITE_BUSY );
testcase( (rc&0xff)==SQLITE_IOERR );
@@ -66611,16 +67933,38 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
}
/* Release the shared CKPT lock obtained above. */
- if( pWal->ckptLock ){
+ if( ckptLock ){
assert( pSnapshot );
walUnlockShared(pWal, WAL_CKPT_LOCK);
- pWal->ckptLock = 0;
}
#endif
return rc;
}
/*
+** Begin a read transaction on the database.
+**
+** This routine used to be called sqlite3OpenSnapshot() and with good reason:
+** it takes a snapshot of the state of the WAL and wal-index for the current
+** instant in time. The current thread will continue to use this snapshot.
+** Other threads might append new content to the WAL and wal-index but
+** that extra content is ignored by the current thread.
+**
+** If the database contents have changes since the previous read
+** transaction, then *pChanged is set to 1 before returning. The
+** Pager layer will use this to know that its cache is stale and
+** needs to be flushed.
+*/
+SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
+ int rc;
+ SEH_TRY {
+ rc = walBeginReadTransaction(pWal, pChanged);
+ }
+ SEH_EXCEPT( rc = walHandleException(pWal); )
+ return rc;
+}
+
+/*
** Finish with a read transaction. All this does is release the
** read-lock.
*/
@@ -66640,7 +67984,7 @@ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){
** Return SQLITE_OK if successful, or an error code if an error occurs. If an
** error does occur, the final value of *piRead is undefined.
*/
-SQLITE_PRIVATE int sqlite3WalFindFrame(
+static int walFindFrame(
Wal *pWal, /* WAL handle */
Pgno pgno, /* Database page number to read data for */
u32 *piRead /* OUT: Frame number (or zero) */
@@ -66703,6 +68047,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
}
nCollide = HASHTABLE_NSLOT;
iKey = walHash(pgno);
+ SEH_INJECT_FAULT;
while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){
u32 iFrame = iH + sLoc.iZero;
if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){
@@ -66710,6 +68055,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
iRead = iFrame;
}
if( (nCollide--)==0 ){
+ *piRead = 0;
return SQLITE_CORRUPT_BKPT;
}
iKey = walNextHash(iKey);
@@ -66740,6 +68086,30 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
}
/*
+** Search the wal file for page pgno. If found, set *piRead to the frame that
+** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
+** to zero.
+**
+** Return SQLITE_OK if successful, or an error code if an error occurs. If an
+** error does occur, the final value of *piRead is undefined.
+**
+** The difference between this function and walFindFrame() is that this
+** function wraps walFindFrame() in an SEH_TRY{...} block.
+*/
+SQLITE_PRIVATE int sqlite3WalFindFrame(
+ Wal *pWal, /* WAL handle */
+ Pgno pgno, /* Database page number to read data for */
+ u32 *piRead /* OUT: Frame number (or zero) */
+){
+ int rc;
+ SEH_TRY {
+ rc = walFindFrame(pWal, pgno, piRead);
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+ return rc;
+}
+
+/*
** Read the contents of frame iRead from the wal file into buffer pOut
** (which is nOut bytes in size). Return SQLITE_OK if successful, or an
** error code otherwise.
@@ -66820,12 +68190,17 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){
** time the read transaction on this connection was started, then
** the write is disallowed.
*/
- if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){
+ SEH_TRY {
+ if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){
+ rc = SQLITE_BUSY_SNAPSHOT;
+ }
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
+
+ if( rc!=SQLITE_OK ){
walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1);
pWal->writeLock = 0;
- rc = SQLITE_BUSY_SNAPSHOT;
}
-
return rc;
}
@@ -66861,30 +68236,33 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p
Pgno iMax = pWal->hdr.mxFrame;
Pgno iFrame;
- /* Restore the clients cache of the wal-index header to the state it
- ** was in before the client began writing to the database.
- */
- memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr));
-
- for(iFrame=pWal->hdr.mxFrame+1;
- ALWAYS(rc==SQLITE_OK) && iFrame<=iMax;
- iFrame++
- ){
- /* This call cannot fail. Unless the page for which the page number
- ** is passed as the second argument is (a) in the cache and
- ** (b) has an outstanding reference, then xUndo is either a no-op
- ** (if (a) is false) or simply expels the page from the cache (if (b)
- ** is false).
- **
- ** If the upper layer is doing a rollback, it is guaranteed that there
- ** are no outstanding references to any page other than page 1. And
- ** page 1 is never written to the log until the transaction is
- ** committed. As a result, the call to xUndo may not fail.
+ SEH_TRY {
+ /* Restore the clients cache of the wal-index header to the state it
+ ** was in before the client began writing to the database.
*/
- assert( walFramePgno(pWal, iFrame)!=1 );
- rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame));
+ memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr));
+
+ for(iFrame=pWal->hdr.mxFrame+1;
+ ALWAYS(rc==SQLITE_OK) && iFrame<=iMax;
+ iFrame++
+ ){
+ /* This call cannot fail. Unless the page for which the page number
+ ** is passed as the second argument is (a) in the cache and
+ ** (b) has an outstanding reference, then xUndo is either a no-op
+ ** (if (a) is false) or simply expels the page from the cache (if (b)
+ ** is false).
+ **
+ ** If the upper layer is doing a rollback, it is guaranteed that there
+ ** are no outstanding references to any page other than page 1. And
+ ** page 1 is never written to the log until the transaction is
+ ** committed. As a result, the call to xUndo may not fail.
+ */
+ assert( walFramePgno(pWal, iFrame)!=1 );
+ rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame));
+ }
+ if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
}
- if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal);
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
}
return rc;
}
@@ -66928,7 +68306,10 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){
pWal->hdr.mxFrame = aWalData[0];
pWal->hdr.aFrameCksum[0] = aWalData[1];
pWal->hdr.aFrameCksum[1] = aWalData[2];
- walCleanupHash(pWal);
+ SEH_TRY {
+ walCleanupHash(pWal);
+ }
+ SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; )
}
return rc;
@@ -66978,7 +68359,7 @@ static int walRestartLog(Wal *pWal){
cnt = 0;
do{
int notUsed;
- rc = walTryBeginRead(pWal, &notUsed, 1, ++cnt);
+ rc = walTryBeginRead(pWal, &notUsed, 1, &cnt);
}while( rc==WAL_RETRY );
assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */
testcase( (rc&0xff)==SQLITE_IOERR );
@@ -67109,7 +68490,7 @@ static int walRewriteChecksums(Wal *pWal, u32 iLast){
** Write a set of frames to the log. The caller must hold the write-lock
** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
*/
-SQLITE_PRIVATE int sqlite3WalFrames(
+static int walFrames(
Wal *pWal, /* Wal handle to write to */
int szPage, /* Database page-size in bytes */
PgHdr *pList, /* List of dirty pages to write */
@@ -67220,7 +68601,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(
** checksums must be recomputed when the transaction is committed. */
if( iFirst && (p->pDirty || isCommit==0) ){
u32 iWrite = 0;
- VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite);
+ VVA_ONLY(rc =) walFindFrame(pWal, p->pgno, &iWrite);
assert( rc==SQLITE_OK || iWrite==0 );
if( iWrite>=iFirst ){
i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE;
@@ -67340,6 +68721,29 @@ SQLITE_PRIVATE int sqlite3WalFrames(
}
/*
+** Write a set of frames to the log. The caller must hold the write-lock
+** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
+**
+** The difference between this function and walFrames() is that this
+** function wraps walFrames() in an SEH_TRY{...} block.
+*/
+SQLITE_PRIVATE int sqlite3WalFrames(
+ Wal *pWal, /* Wal handle to write to */
+ int szPage, /* Database page-size in bytes */
+ PgHdr *pList, /* List of dirty pages to write */
+ Pgno nTruncate, /* Database size after this commit */
+ int isCommit, /* True if this is a commit */
+ int sync_flags /* Flags to pass to OsSync() (or 0) */
+){
+ int rc;
+ SEH_TRY {
+ rc = walFrames(pWal, szPage, pList, nTruncate, isCommit, sync_flags);
+ }
+ SEH_EXCEPT( rc = walHandleException(pWal); )
+ return rc;
+}
+
+/*
** This routine is called to implement sqlite3_wal_checkpoint() and
** related interfaces.
**
@@ -67376,10 +68780,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
if( pWal->readOnly ) return SQLITE_READONLY;
WALTRACE(("WAL%p: checkpoint begins\n", pWal));
- /* Enable blocking locks, if possible. If blocking locks are successfully
- ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */
+ /* Enable blocking locks, if possible. */
sqlite3WalDb(pWal, db);
- (void)walEnableBlocking(pWal);
+ if( xBusy2 ) (void)walEnableBlocking(pWal);
/* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
** "checkpoint" lock on the database file.
@@ -67418,30 +68821,38 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
/* Read the wal-index header. */
- if( rc==SQLITE_OK ){
- walDisableBlocking(pWal);
- rc = walIndexReadHdr(pWal, &isChanged);
- (void)walEnableBlocking(pWal);
- if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
- sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ SEH_TRY {
+ if( rc==SQLITE_OK ){
+ /* For a passive checkpoint, do not re-enable blocking locks after
+ ** reading the wal-index header. A passive checkpoint should not block
+ ** or invoke the busy handler. The only lock such a checkpoint may
+ ** attempt to obtain is a lock on a read-slot, and it should give up
+ ** immediately and do a partial checkpoint if it cannot obtain it. */
+ walDisableBlocking(pWal);
+ rc = walIndexReadHdr(pWal, &isChanged);
+ if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal);
+ if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
+ sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ }
}
- }
-
- /* Copy data from the log to the database file. */
- if( rc==SQLITE_OK ){
- if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
- rc = SQLITE_CORRUPT_BKPT;
- }else{
- rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
- }
+ /* Copy data from the log to the database file. */
+ if( rc==SQLITE_OK ){
+ if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
+ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags,zBuf);
+ }
- /* If no error occurred, set the output variables. */
- if( rc==SQLITE_OK || rc==SQLITE_BUSY ){
- if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame;
- if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill);
+ /* If no error occurred, set the output variables. */
+ if( rc==SQLITE_OK || rc==SQLITE_BUSY ){
+ if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame;
+ SEH_INJECT_FAULT;
+ if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill);
+ }
}
}
+ SEH_EXCEPT( rc = walHandleException(pWal); )
if( isChanged ){
/* If a new wal-index header was loaded before the checkpoint was
@@ -67518,7 +68929,9 @@ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){
** locks are taken in this case). Nor should the pager attempt to
** upgrade to exclusive-mode following such an error.
*/
+#ifndef SQLITE_USE_SEH
assert( pWal->readLock>=0 || pWal->lockError );
+#endif
assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) );
if( op==0 ){
@@ -67619,16 +69032,19 @@ SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
*/
SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){
int rc;
- rc = walLockShared(pWal, WAL_CKPT_LOCK);
- if( rc==SQLITE_OK ){
- WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
- if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
- || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted
- ){
- rc = SQLITE_ERROR_SNAPSHOT;
- walUnlockShared(pWal, WAL_CKPT_LOCK);
+ SEH_TRY {
+ rc = walLockShared(pWal, WAL_CKPT_LOCK);
+ if( rc==SQLITE_OK ){
+ WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
+ if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
+ || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted
+ ){
+ rc = SQLITE_ERROR_SNAPSHOT;
+ walUnlockShared(pWal, WAL_CKPT_LOCK);
+ }
}
}
+ SEH_EXCEPT( rc = walHandleException(pWal); )
return rc;
}
@@ -67751,7 +69167,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){
** 22 1 Min embedded payload fraction (must be 32)
** 23 1 Min leaf payload fraction (must be 32)
** 24 4 File change counter
-** 28 4 Reserved for future use
+** 28 4 The size of the database in pages
** 32 4 First freelist page
** 36 4 Number of freelist pages in the file
** 40 60 15 4-byte meta values passed to higher layers
@@ -67867,7 +69283,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){
** 0x81 0x00 becomes 0x00000080
** 0x82 0x00 becomes 0x00000100
** 0x80 0x7f becomes 0x0000007f
-** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678
+** 0x81 0x91 0xd1 0xac 0x78 becomes 0x12345678
** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081
**
** Variable length integers are used for rowids and to hold the number of
@@ -67950,7 +69366,7 @@ typedef struct CellInfo CellInfo;
** page that has been loaded into memory. The information in this object
** is derived from the raw on-disk page content.
**
-** As each database page is loaded into memory, the pager allocats an
+** As each database page is loaded into memory, the pager allocates an
** instance of this object and zeros the first 8 bytes. (This is the
** "extra" information associated with each page of the pager.)
**
@@ -68382,7 +69798,7 @@ struct IntegrityCk {
BtShared *pBt; /* The tree being checked out */
Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */
u8 *aPgRef; /* 1 bit per page in the db (see above) */
- Pgno nPage; /* Number of pages in the database */
+ Pgno nCkPage; /* Pages in the database. 0 for partial check */
int mxErr; /* Stop accumulating errors when this reaches zero */
int nErr; /* Number of messages written to zErrMsg so far */
int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */
@@ -68406,7 +69822,7 @@ struct IntegrityCk {
/*
** get2byteAligned(), unlike get2byte(), requires that its argument point to a
-** two-byte aligned address. get2bytea() is only used for accessing the
+** two-byte aligned address. get2byteAligned() is only used for accessing the
** cell addresses in a btree header.
*/
#if SQLITE_BYTEORDER==4321
@@ -68583,7 +69999,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){
**
** There is a corresponding leave-all procedures.
**
-** Enter the mutexes in accending order by BtShared pointer address
+** Enter the mutexes in ascending order by BtShared pointer address
** to avoid the possibility of deadlock when two threads with
** two or more btrees in common both try to lock all their btrees
** at the same instant.
@@ -70250,7 +71666,7 @@ static void ptrmapPutOvflPtr(MemPage *pPage, MemPage *pSrc, u8 *pCell,int *pRC){
pPage->xParseCell(pPage, pCell, &info);
if( info.nLocal<info.nPayload ){
Pgno ovfl;
- if( SQLITE_WITHIN(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){
+ if( SQLITE_OVERFLOW(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){
testcase( pSrc!=pPage );
*pRC = SQLITE_CORRUPT_BKPT;
return;
@@ -70351,7 +71767,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
iCellStart = get2byte(&data[hdr+5]);
if( nCell>0 ){
temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
- memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart);
+ memcpy(temp, data, usableSize);
src = temp;
for(i=0; i<nCell; i++){
u8 *pAddr; /* The i-th cell pointer */
@@ -70575,7 +71991,7 @@ static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
**
** Even though the freeblock list was checked by btreeComputeFreeSpace(),
** that routine will not detect overlap between cells or freeblocks. Nor
-** does it detect cells or freeblocks that encrouch into the reserved bytes
+** does it detect cells or freeblocks that encroach into the reserved bytes
** at the end of the page. So do additional corruption checks inside this
** routine and return SQLITE_CORRUPT if any problems are found.
*/
@@ -71034,68 +72450,41 @@ SQLITE_PRIVATE Pgno sqlite3BtreeLastPage(Btree *p){
/*
** Get a page from the pager and initialize it.
-**
-** If pCur!=0 then the page is being fetched as part of a moveToChild()
-** call. Do additional sanity checking on the page in this case.
-** And if the fetch fails, this routine must decrement pCur->iPage.
-**
-** The page is fetched as read-write unless pCur is not NULL and is
-** a read-only cursor.
-**
-** If an error occurs, then *ppPage is undefined. It
-** may remain unchanged, or it may be set to an invalid value.
*/
static int getAndInitPage(
BtShared *pBt, /* The database file */
Pgno pgno, /* Number of the page to get */
MemPage **ppPage, /* Write the page pointer here */
- BtCursor *pCur, /* Cursor to receive the page, or NULL */
int bReadOnly /* True for a read-only page */
){
int rc;
DbPage *pDbPage;
+ MemPage *pPage;
assert( sqlite3_mutex_held(pBt->mutex) );
- assert( pCur==0 || ppPage==&pCur->pPage );
- assert( pCur==0 || bReadOnly==pCur->curPagerFlags );
- assert( pCur==0 || pCur->iPage>0 );
if( pgno>btreePagecount(pBt) ){
- rc = SQLITE_CORRUPT_BKPT;
- goto getAndInitPage_error1;
+ *ppPage = 0;
+ return SQLITE_CORRUPT_BKPT;
}
rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly);
if( rc ){
- goto getAndInitPage_error1;
+ *ppPage = 0;
+ return rc;
}
- *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
- if( (*ppPage)->isInit==0 ){
+ pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage);
+ if( pPage->isInit==0 ){
btreePageFromDbPage(pDbPage, pgno, pBt);
- rc = btreeInitPage(*ppPage);
+ rc = btreeInitPage(pPage);
if( rc!=SQLITE_OK ){
- goto getAndInitPage_error2;
+ releasePage(pPage);
+ *ppPage = 0;
+ return rc;
}
}
- assert( (*ppPage)->pgno==pgno || CORRUPT_DB );
- assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) );
-
- /* If obtaining a child page for a cursor, we must verify that the page is
- ** compatible with the root page. */
- if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){
- rc = SQLITE_CORRUPT_PGNO(pgno);
- goto getAndInitPage_error2;
- }
+ assert( pPage->pgno==pgno || CORRUPT_DB );
+ assert( pPage->aData==sqlite3PagerGetData(pDbPage) );
+ *ppPage = pPage;
return SQLITE_OK;
-
-getAndInitPage_error2:
- releasePage(*ppPage);
-getAndInitPage_error1:
- if( pCur ){
- pCur->iPage--;
- pCur->pPage = pCur->apPage[pCur->iPage];
- }
- testcase( pgno==0 );
- assert( pgno!=0 || rc!=SQLITE_OK );
- return rc;
}
/*
@@ -71178,7 +72567,7 @@ static void pageReinit(DbPage *pData){
** call to btreeInitPage() will likely return SQLITE_CORRUPT.
** But no harm is done by this. And it is very important that
** btreeInitPage() be called on every btree page so we make
- ** the call for every page that comes in for re-initing. */
+ ** the call for every page that comes in for re-initializing. */
btreeInitPage(pPage);
}
}
@@ -71357,6 +72746,9 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
assert( sizeof(u16)==2 );
assert( sizeof(Pgno)==4 );
+ /* Suppress false-positive compiler warning from PVS-Studio */
+ memset(&zDbHeader[16], 0, 8);
+
pBt = sqlite3MallocZero( sizeof(*pBt) );
if( pBt==0 ){
rc = SQLITE_NOMEM_BKPT;
@@ -71573,7 +72965,7 @@ static SQLITE_NOINLINE int allocateTempSpace(BtShared *pBt){
** can mean that fillInCell() only initializes the first 2 or 3
** bytes of pTmpSpace, but that the first 4 bytes are copied from
** it into a database page. This is not actually a problem, but it
- ** does cause a valgrind error when the 1 or 2 bytes of unitialized
+ ** does cause a valgrind error when the 1 or 2 bytes of uninitialized
** data is passed to system call write(). So to avoid this error,
** zero the first 4 bytes of temp space here.
**
@@ -71808,7 +73200,7 @@ SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){
/*
** Return the number of bytes of space at the end of every page that
-** are intentually left unused. This is the "reserved" space that is
+** are intentionally left unused. This is the "reserved" space that is
** sometimes used by extensions.
**
** The value returned is the larger of the current reserve size and
@@ -72055,7 +73447,6 @@ static int lockBtree(BtShared *pBt){
){
goto page1_init_failed;
}
- pBt->btsFlags |= BTS_PAGESIZE_FIXED;
assert( (pageSize & 7)==0 );
/* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
** integer at offset 20 is the number of bytes of space at the end of
@@ -72075,6 +73466,7 @@ static int lockBtree(BtShared *pBt){
releasePageOne(pPage1);
pBt->usableSize = usableSize;
pBt->pageSize = pageSize;
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
freeTempSpace(pBt);
rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize,
pageSize-usableSize);
@@ -72094,6 +73486,7 @@ static int lockBtree(BtShared *pBt){
if( usableSize<480 ){
goto page1_init_failed;
}
+ pBt->btsFlags |= BTS_PAGESIZE_FIXED;
pBt->pageSize = pageSize;
pBt->usableSize = usableSize;
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -72272,7 +73665,11 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){
** when A already has a read lock, we encourage A to give up and let B
** proceed.
*/
-SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+static SQLITE_NOINLINE int btreeBeginTrans(
+ Btree *p, /* The btree in which to start the transaction */
+ int wrflag, /* True to start a write transaction */
+ int *pSchemaVersion /* Put schema version number here, if not NULL */
+){
BtShared *pBt = p->pBt;
Pager *pPager = pBt->pPager;
int rc = SQLITE_OK;
@@ -72444,6 +73841,28 @@ trans_begun:
sqlite3BtreeLeave(p);
return rc;
}
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+ BtShared *pBt;
+ if( p->sharable
+ || p->inTrans==TRANS_NONE
+ || (p->inTrans==TRANS_READ && wrflag!=0)
+ ){
+ return btreeBeginTrans(p,wrflag,pSchemaVersion);
+ }
+ pBt = p->pBt;
+ if( pSchemaVersion ){
+ *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]);
+ }
+ if( wrflag ){
+ /* This call makes sure that the pager has the correct number of
+ ** open savepoints. If the second parameter is greater than 0 and
+ ** the sub-journal is not already open, then it will be opened here.
+ */
+ return sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint);
+ }else{
+ return SQLITE_OK;
+ }
+}
#ifndef SQLITE_OMIT_AUTOVACUUM
@@ -73539,7 +74958,6 @@ SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor *pCur){
pCur->curFlags &= ~BTCF_Pinned;
}
-#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
/*
** Return the offset into the database file for the start of the
** payload to which the cursor is pointing.
@@ -73551,7 +74969,6 @@ SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor *pCur){
return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) +
(i64)(pCur->info.pPayload - pCur->pPage->aData);
}
-#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */
/*
** Return the number of bytes of payload for the entry that pCur is
@@ -73577,7 +74994,7 @@ SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){
** routine always returns 2147483647 (which is the largest record
** that SQLite can handle) or more. But returning a smaller value might
** prevent large memory allocations when trying to interpret a
-** corrupt datrabase.
+** corrupt database.
**
** The current implementation merely returns the size of the underlying
** database file.
@@ -73877,7 +75294,6 @@ static int accessPayload(
assert( aWrite>=pBufStart ); /* due to (6) */
memcpy(aSave, aWrite, 4);
rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
- if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT;
nextPage = get4byte(aWrite);
memcpy(aWrite, aSave, 4);
}else
@@ -74039,6 +75455,7 @@ SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){
** vice-versa).
*/
static int moveToChild(BtCursor *pCur, u32 newPgno){
+ int rc;
assert( cursorOwnsBtShared(pCur) );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPage<BTCURSOR_MAX_DEPTH );
@@ -74052,8 +75469,18 @@ static int moveToChild(BtCursor *pCur, u32 newPgno){
pCur->apPage[pCur->iPage] = pCur->pPage;
pCur->ix = 0;
pCur->iPage++;
- return getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur,
- pCur->curPagerFlags);
+ rc = getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur->curPagerFlags);
+ assert( pCur->pPage!=0 || rc!=SQLITE_OK );
+ if( rc==SQLITE_OK
+ && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey)
+ ){
+ releasePage(pCur->pPage);
+ rc = SQLITE_CORRUPT_PGNO(newPgno);
+ }
+ if( rc ){
+ pCur->pPage = pCur->apPage[--pCur->iPage];
+ }
+ return rc;
}
#ifdef SQLITE_DEBUG
@@ -74160,7 +75587,7 @@ static int moveToRoot(BtCursor *pCur){
sqlite3BtreeClearCursor(pCur);
}
rc = getAndInitPage(pCur->pBt, pCur->pgnoRoot, &pCur->pPage,
- 0, pCur->curPagerFlags);
+ pCur->curPagerFlags);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
return rc;
@@ -74272,7 +75699,7 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
*pRes = 0;
rc = moveToLeftmost(pCur);
}else if( rc==SQLITE_EMPTY ){
- assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ assert( pCur->pgnoRoot==0 || (pCur->pPage!=0 && pCur->pPage->nCell==0) );
*pRes = 1;
rc = SQLITE_OK;
}
@@ -74377,7 +75804,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto(
/* If the requested key is one more than the previous key, then
** try to get there using sqlite3BtreeNext() rather than a full
** binary search. This is an optimization only. The correct answer
- ** is still obtained without this case, only a little more slowely */
+ ** is still obtained without this case, only a little more slowly. */
if( pCur->info.nKey+1==intKey ){
*pRes = 0;
rc = sqlite3BtreeNext(pCur, 0);
@@ -74773,10 +76200,36 @@ bypass_moveto_root:
}else{
chldPg = get4byte(findCell(pPage, lwr));
}
- pCur->ix = (u16)lwr;
- rc = moveToChild(pCur, chldPg);
- if( rc ) break;
- }
+
+ /* This block is similar to an in-lined version of:
+ **
+ ** pCur->ix = (u16)lwr;
+ ** rc = moveToChild(pCur, chldPg);
+ ** if( rc ) break;
+ */
+ pCur->info.nSize = 0;
+ pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ pCur->aiIdx[pCur->iPage] = (u16)lwr;
+ pCur->apPage[pCur->iPage] = pCur->pPage;
+ pCur->ix = 0;
+ pCur->iPage++;
+ rc = getAndInitPage(pCur->pBt, chldPg, &pCur->pPage, pCur->curPagerFlags);
+ if( rc==SQLITE_OK
+ && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey)
+ ){
+ releasePage(pCur->pPage);
+ rc = SQLITE_CORRUPT_PGNO(chldPg);
+ }
+ if( rc ){
+ pCur->pPage = pCur->apPage[--pCur->iPage];
+ break;
+ }
+ /*
+ ***** End of in-lined moveToChild() call */
+ }
moveto_index_finish:
pCur->info.nSize = 0;
assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
@@ -74960,7 +76413,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){
}
pPage = pCur->pPage;
- assert( pPage->isInit );
+ if( sqlite3FaultSim(412) ) pPage->isInit = 0;
+ if( !pPage->isInit ){
+ return SQLITE_CORRUPT_BKPT;
+ }
if( !pPage->leaf ){
int idx = pCur->ix;
rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
@@ -75560,7 +77016,7 @@ static SQLITE_NOINLINE int clearCellOverflow(
/* Call xParseCell to compute the size of a cell. If the cell contains
** overflow, then invoke cellClearOverflow to clear out that overflow.
-** STore the result code (SQLITE_OK or some error code) in rc.
+** Store the result code (SQLITE_OK or some error code) in rc.
**
** Implemented as macro to force inlining for performance.
*/
@@ -76171,12 +77627,13 @@ static int rebuildPage(
int k; /* Current slot in pCArray->apEnd[] */
u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */
+ assert( nCell>0 );
assert( i<iEnd );
j = get2byte(&aData[hdr+5]);
- if( NEVER(j>(u32)usableSize) ){ j = 0; }
+ if( j>(u32)usableSize ){ j = 0; }
memcpy(&pTmp[j], &aData[j], usableSize - j);
- for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i; k++){}
pSrcEnd = pCArray->apEnd[k];
pData = pEnd;
@@ -76239,7 +77696,7 @@ static int rebuildPage(
** Finally, argument pBegin points to the byte immediately following the
** end of the space required by this page for the cell-pointer area (for
** all cells - not just those inserted by the current call). If the content
-** area must be extended to before this point in order to accomodate all
+** area must be extended to before this point in order to accommodate all
** cells in apCell[], then the cells do not fit and non-zero is returned.
*/
static int pageInsertArray(
@@ -76259,7 +77716,7 @@ static int pageInsertArray(
u8 *pEnd; /* Maximum extent of cell data */
assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */
if( iEnd<=iFirst ) return 0;
- for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i ; k++){}
pEnd = pCArray->apEnd[k];
while( 1 /*Exit by break*/ ){
int sz, rc;
@@ -76477,6 +77934,7 @@ static int editPage(
return SQLITE_OK;
editpage_fail:
/* Unable to edit this page. Rebuild it from scratch instead. */
+ if( nNew<1 ) return SQLITE_CORRUPT_BKPT;
populateCellCache(pCArray, iNew, nNew);
return rebuildPage(pCArray, iNew, nNew, pPg);
}
@@ -76554,7 +78012,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
** with entries for the new page, and any pointer from the
** cell on the page to an overflow page. If either of these
** operations fails, the return code is set, but the contents
- ** of the parent page are still manipulated by thh code below.
+ ** of the parent page are still manipulated by the code below.
** That is Ok, at this point the parent page is guaranteed to
** be marked as dirty. Returning an error code will cause a
** rollback, undoing any changes made to the parent page.
@@ -76830,7 +78288,7 @@ static int balance_nonroot(
pgno = get4byte(pRight);
while( 1 ){
if( rc==SQLITE_OK ){
- rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0);
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0);
}
if( rc ){
memset(apOld, 0, (i+1)*sizeof(MemPage*));
@@ -77144,7 +78602,7 @@ static int balance_nonroot(
}
}
- /* Sanity check: For a non-corrupt database file one of the follwing
+ /* Sanity check: For a non-corrupt database file one of the following
** must be true:
** (1) We found one or more cells (cntNew[0])>0), or
** (2) pPage is a virtual root page. A virtual root page is when
@@ -77369,9 +78827,9 @@ static int balance_nonroot(
iOvflSpace += sz;
assert( sz<=pBt->maxLocal+23 );
assert( iOvflSpace <= (int)pBt->pageSize );
- for(k=0; b.ixNx[k]<=j && ALWAYS(k<NB*2); k++){}
+ for(k=0; ALWAYS(k<NB*2) && b.ixNx[k]<=j; k++){}
pSrcEnd = b.apEnd[k];
- if( SQLITE_WITHIN(pSrcEnd, pCell, pCell+sz) ){
+ if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){
rc = SQLITE_CORRUPT_BKPT;
goto balance_cleanup;
}
@@ -77405,6 +78863,8 @@ static int balance_nonroot(
for(i=1-nNew; i<nNew; i++){
int iPg = i<0 ? -i : i;
assert( iPg>=0 && iPg<nNew );
+ assert( iPg>=1 || i>=0 );
+ assert( iPg<ArraySize(cntOld) );
if( abDone[iPg] ) continue; /* Skip pages already processed */
if( i>=0 /* On the upwards pass, or... */
|| cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */
@@ -77761,7 +79221,7 @@ static int btreeOverwriteContent(
){
int nData = pX->nData - iOffset;
if( nData<=0 ){
- /* Overwritting with zeros */
+ /* Overwriting with zeros */
int i;
for(i=0; i<iAmt && pDest[i]==0; i++){}
if( i<iAmt ){
@@ -77797,7 +79257,7 @@ static int btreeOverwriteContent(
** cell.
*/
static SQLITE_NOINLINE int btreeOverwriteOverflowCell(
- BtCursor *pCur, /* Cursor pointing to cell to ovewrite */
+ BtCursor *pCur, /* Cursor pointing to cell to overwrite */
const BtreePayload *pX /* Content to write into the cell */
){
int iOffset; /* Next byte of pX->pData to write */
@@ -78544,7 +80004,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){
MemPage *pRoot;
Pgno pgnoRoot;
int rc;
- int ptfFlags; /* Page-type flage for the root page of new table */
+ int ptfFlags; /* Page-type flags for the root page of new table */
assert( sqlite3BtreeHoldsMutex(p) );
assert( pBt->inTransaction==TRANS_WRITE );
@@ -78713,7 +80173,7 @@ static int clearDatabasePage(
if( pgno>btreePagecount(pBt) ){
return SQLITE_CORRUPT_BKPT;
}
- rc = getAndInitPage(pBt, pgno, &pPage, 0, 0);
+ rc = getAndInitPage(pBt, pgno, &pPage, 0);
if( rc ) return rc;
if( (pBt->openFlags & BTREE_SINGLE)==0
&& sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1))
@@ -79134,7 +80594,8 @@ static void checkAppendMsg(
** corresponds to page iPg is already set.
*/
static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
- assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ assert( pCheck->aPgRef!=0 );
+ assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 );
return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07)));
}
@@ -79142,7 +80603,8 @@ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){
** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg.
*/
static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
- assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 );
+ assert( pCheck->aPgRef!=0 );
+ assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 );
pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07));
}
@@ -79156,7 +80618,7 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
** Also check that the page number is in bounds.
*/
static int checkRef(IntegrityCk *pCheck, Pgno iPage){
- if( iPage>pCheck->nPage || iPage==0 ){
+ if( iPage>pCheck->nCkPage || iPage==0 ){
checkAppendMsg(pCheck, "invalid page number %u", iPage);
return 1;
}
@@ -79379,10 +80841,11 @@ static int checkTreePage(
if( iPage==0 ) return 0;
if( checkRef(pCheck, iPage) ) return 0;
pCheck->zPfx = "Tree %u page %u: ";
- pCheck->v0 = pCheck->v1 = iPage;
+ pCheck->v1 = iPage;
if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){
checkAppendMsg(pCheck,
"unable to get the page. error code=%d", rc);
+ if( rc==SQLITE_IOERR_NOMEM ) pCheck->rc = SQLITE_NOMEM;
goto end_of_check;
}
@@ -79653,15 +81116,15 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
sCheck.db = db;
sCheck.pBt = pBt;
sCheck.pPager = pBt->pPager;
- sCheck.nPage = btreePagecount(sCheck.pBt);
+ sCheck.nCkPage = btreePagecount(sCheck.pBt);
sCheck.mxErr = mxErr;
sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL;
- if( sCheck.nPage==0 ){
+ if( sCheck.nCkPage==0 ){
goto integrity_ck_cleanup;
}
- sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
+ sCheck.aPgRef = sqlite3MallocZero((sCheck.nCkPage / 8)+ 1);
if( !sCheck.aPgRef ){
checkOom(&sCheck);
goto integrity_ck_cleanup;
@@ -79673,7 +81136,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
}
i = PENDING_BYTE_PAGE(pBt);
- if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i);
+ if( i<=sCheck.nCkPage ) setPageReferenced(&sCheck, i);
/* Check the integrity of the freelist
*/
@@ -79716,6 +81179,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0);
}
#endif
+ sCheck.v0 = aRoot[i];
checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64);
}
pBt->db->flags = savedDbFlags;
@@ -79723,7 +81187,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
/* Make sure every page in the file is referenced
*/
if( !bPartial ){
- for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){
+ for(i=1; i<=sCheck.nCkPage && sCheck.mxErr; i++){
#ifdef SQLITE_OMIT_AUTOVACUUM
if( getPageReferenced(&sCheck, i)==0 ){
checkAppendMsg(&sCheck, "Page %u: never used", i);
@@ -81143,6 +82607,40 @@ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
}
/*
+** If pMem is already a string, detect if it is a zero-terminated
+** string, or make it into one if possible, and mark it as such.
+**
+** This is an optimization. Correct operation continues even if
+** this routine is a no-op.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){
+ if( (pMem->flags & (MEM_Str|MEM_Term|MEM_Ephem|MEM_Static))!=MEM_Str ){
+ /* pMem must be a string, and it cannot be an ephemeral or static string */
+ return;
+ }
+ if( pMem->enc!=SQLITE_UTF8 ) return;
+ if( NEVER(pMem->z==0) ) return;
+ if( pMem->flags & MEM_Dyn ){
+ if( pMem->xDel==sqlite3_free
+ && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1)
+ ){
+ pMem->z[pMem->n] = 0;
+ pMem->flags |= MEM_Term;
+ return;
+ }
+ if( pMem->xDel==sqlite3RCStrUnref ){
+ /* Blindly assume that all RCStr objects are zero-terminated */
+ pMem->flags |= MEM_Term;
+ return;
+ }
+ }else if( pMem->szMalloc >= pMem->n+1 ){
+ pMem->z[pMem->n] = 0;
+ pMem->flags |= MEM_Term;
+ return;
+ }
+}
+
+/*
** It is already known that pMem contains an unterminated string.
** Add the zero terminator.
**
@@ -81404,36 +82902,6 @@ SQLITE_PRIVATE void sqlite3VdbeMemReleaseMalloc(Mem *p){
}
/*
-** Convert a 64-bit IEEE double into a 64-bit signed integer.
-** If the double is out of range of a 64-bit signed integer then
-** return the closest available 64-bit signed integer.
-*/
-static SQLITE_NOINLINE i64 doubleToInt64(double r){
-#ifdef SQLITE_OMIT_FLOATING_POINT
- /* When floating-point is omitted, double and int64 are the same thing */
- return r;
-#else
- /*
- ** Many compilers we encounter do not define constants for the
- ** minimum and maximum 64-bit integers, or they define them
- ** inconsistently. And many do not understand the "LL" notation.
- ** So we define our own static constants here using nothing
- ** larger than a 32-bit integer constant.
- */
- static const i64 maxInt = LARGEST_INT64;
- static const i64 minInt = SMALLEST_INT64;
-
- if( r<=(double)minInt ){
- return minInt;
- }else if( r>=(double)maxInt ){
- return maxInt;
- }else{
- return (i64)r;
- }
-#endif
-}
-
-/*
** Return some kind of integer value which is the best we can do
** at representing the value that *pMem describes as an integer.
** If pMem is an integer, then the value is exact. If pMem is
@@ -81459,7 +82927,7 @@ SQLITE_PRIVATE i64 sqlite3VdbeIntValue(const Mem *pMem){
testcase( flags & MEM_IntReal );
return pMem->u.i;
}else if( flags & MEM_Real ){
- return doubleToInt64(pMem->u.r);
+ return sqlite3RealToI64(pMem->u.r);
}else if( (flags & (MEM_Str|MEM_Blob))!=0 && pMem->z!=0 ){
return memIntValue(pMem);
}else{
@@ -81521,7 +82989,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
if( pMem->flags & MEM_IntReal ){
MemSetTypeFlag(pMem, MEM_Int);
}else{
- i64 ix = doubleToInt64(pMem->u.r);
+ i64 ix = sqlite3RealToI64(pMem->u.r);
/* Only mark the value as an integer if
**
@@ -81589,8 +83057,8 @@ SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){
** from UBSAN.
*/
SQLITE_PRIVATE i64 sqlite3RealToI64(double r){
- if( r<=(double)SMALLEST_INT64 ) return SMALLEST_INT64;
- if( r>=(double)LARGEST_INT64) return LARGEST_INT64;
+ if( r<-9223372036854774784.0 ) return SMALLEST_INT64;
+ if( r>+9223372036854774784.0 ) return LARGEST_INT64;
return (i64)r;
}
@@ -81661,6 +83129,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
break;
}
default: {
+ int rc;
assert( aff==SQLITE_AFF_TEXT );
assert( MEM_Str==(MEM_Blob>>3) );
pMem->flags |= (pMem->flags&MEM_Blob)>>3;
@@ -81668,7 +83137,9 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero);
if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1;
- return sqlite3VdbeChangeEncoding(pMem, encoding);
+ rc = sqlite3VdbeChangeEncoding(pMem, encoding);
+ if( rc ) return rc;
+ sqlite3VdbeMemZeroTerminateIfAble(pMem);
}
}
return SQLITE_OK;
@@ -82192,6 +83663,24 @@ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
return valueToText(pVal, enc);
}
+/* Return true if sqlit3_value object pVal is a string or blob value
+** that uses the destructor specified in the second argument.
+**
+** TODO: Maybe someday promote this interface into a published API so
+** that third-party extensions can get access to it?
+*/
+SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value *pVal, void(*xFree)(void*)){
+ if( ALWAYS(pVal!=0)
+ && ALWAYS((pVal->flags & (MEM_Str|MEM_Blob))!=0)
+ && (pVal->flags & MEM_Dyn)!=0
+ && pVal->xDel==xFree
+ ){
+ return 1;
+ }else{
+ return 0;
+ }
+}
+
/*
** Create a new sqlite3_value object.
*/
@@ -82259,6 +83748,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){
}
pRec->nField = p->iVal+1;
+ sqlite3VdbeMemSetNull(&pRec->aMem[p->iVal]);
return &pRec->aMem[p->iVal];
}
#else
@@ -82317,7 +83807,7 @@ static int valueFromFunction(
#endif
assert( pFunc );
if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0
- || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)
+ || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0
){
return SQLITE_OK;
}
@@ -82518,6 +84008,7 @@ static int valueFromExpr(
if( pVal ){
pVal->flags = MEM_Int;
pVal->u.i = pExpr->u.zToken[4]==0;
+ sqlite3ValueApplyAffinity(pVal, affinity, enc);
}
}
@@ -83040,14 +84531,44 @@ static int growOpArray(Vdbe *v, int nOp){
** sqlite3CantopenError(lineno)
*/
static void test_addop_breakpoint(int pc, Op *pOp){
- static int n = 0;
+ static u64 n = 0;
(void)pc;
(void)pOp;
n++;
+ if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */
}
#endif
/*
+** Slow paths for sqlite3VdbeAddOp3() and sqlite3VdbeAddOp4Int() for the
+** unusual case when we need to increase the size of the Vdbe.aOp[] array
+** before adding the new opcode.
+*/
+static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
+ assert( p->nOpAlloc<=p->nOp );
+ if( growOpArray(p, 1) ) return 1;
+ assert( p->nOpAlloc>p->nOp );
+ return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+}
+static SQLITE_NOINLINE int addOp4IntSlow(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+ if( p->db->mallocFailed==0 ){
+ VdbeOp *pOp = &p->aOp[addr];
+ pOp->p4type = P4_INT32;
+ pOp->p4.i = p4;
+ }
+ return addr;
+}
+
+
+/*
** Add a new instruction to the list of instructions current in the
** VDBE. Return the address of the new instruction.
**
@@ -83057,17 +84578,16 @@ static void test_addop_breakpoint(int pc, Op *pOp){
**
** op The opcode for this instruction
**
-** p1, p2, p3 Operands
-**
-** Use the sqlite3VdbeResolveLabel() function to fix an address and
-** the sqlite3VdbeChangeP4() function to change the value of the P4
-** operand.
+** p1, p2, p3, p4 Operands
*/
-static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){
- assert( p->nOpAlloc<=p->nOp );
- if( growOpArray(p, 1) ) return 1;
- assert( p->nOpAlloc>p->nOp );
- return sqlite3VdbeAddOp3(p, op, p1, p2, p3);
+SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
+ return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
+ return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
+}
+SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
+ return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
}
SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
int i;
@@ -83090,6 +84610,9 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
pOp->p3 = p3;
pOp->p4.p = 0;
pOp->p4type = P4_NOTUSED;
+
+ /* Replicate this logic in sqlite3VdbeAddOp4Int()
+ ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
pOp->zComment = 0;
#endif
@@ -83106,16 +84629,59 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
#ifdef SQLITE_VDBE_COVERAGE
pOp->iSrcLine = 0;
#endif
+ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ** Replicate in sqlite3VdbeAddOp4Int() */
+
return i;
}
-SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){
- return sqlite3VdbeAddOp3(p, op, 0, 0, 0);
-}
-SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){
- return sqlite3VdbeAddOp3(p, op, p1, 0, 0);
-}
-SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){
- return sqlite3VdbeAddOp3(p, op, p1, p2, 0);
+SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
+ Vdbe *p, /* Add the opcode to this VM */
+ int op, /* The new opcode */
+ int p1, /* The P1 operand */
+ int p2, /* The P2 operand */
+ int p3, /* The P3 operand */
+ int p4 /* The P4 operand as an integer */
+){
+ int i;
+ VdbeOp *pOp;
+
+ i = p->nOp;
+ if( p->nOpAlloc<=i ){
+ return addOp4IntSlow(p, op, p1, p2, p3, p4);
+ }
+ p->nOp++;
+ pOp = &p->aOp[i];
+ assert( pOp!=0 );
+ pOp->opcode = (u8)op;
+ pOp->p5 = 0;
+ pOp->p1 = p1;
+ pOp->p2 = p2;
+ pOp->p3 = p3;
+ pOp->p4.i = p4;
+ pOp->p4type = P4_INT32;
+
+ /* Replicate this logic in sqlite3VdbeAddOp3()
+ ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ pOp->zComment = 0;
+#endif
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ pOp->nExec = 0;
+ pOp->nCycle = 0;
+#endif
+#ifdef SQLITE_DEBUG
+ if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ sqlite3VdbePrintOp(0, i, &p->aOp[i]);
+ test_addop_breakpoint(i, &p->aOp[i]);
+ }
+#endif
+#ifdef SQLITE_VDBE_COVERAGE
+ pOp->iSrcLine = 0;
+#endif
+ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ** Replicate in sqlite3VdbeAddOp3() */
+
+ return i;
}
/* Generate code for an unconditional jump to instruction iDest
@@ -83293,7 +84859,7 @@ SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt,
if( bPush){
pParse->addrExplain = iThis;
}
- sqlite3VdbeScanStatus(v, iThis, 0, 0, 0, 0);
+ sqlite3VdbeScanStatus(v, iThis, -1, -1, 0, 0);
}
return addr;
}
@@ -83323,26 +84889,6 @@ SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere,
sqlite3MayAbort(p->pParse);
}
-/*
-** Add an opcode that includes the p4 value as an integer.
-*/
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
- Vdbe *p, /* Add the opcode to this VM */
- int op, /* The new opcode */
- int p1, /* The P1 operand */
- int p2, /* The P2 operand */
- int p3, /* The P3 operand */
- int p4 /* The P4 operand as an integer */
-){
- int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
- if( p->db->mallocFailed==0 ){
- VdbeOp *pOp = &p->aOp[addr];
- pOp->p4type = P4_INT32;
- pOp->p4.i = p4;
- }
- return addr;
-}
-
/* Insert the end of a co-routine
*/
SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){
@@ -83655,7 +85201,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
p->bIsReader = 0;
pOp = &p->aOp[p->nOp-1];
assert( p->aOp[0].opcode==OP_Init );
- while( 1 /* Loop termates when it reaches the OP_Init opcode */ ){
+ while( 1 /* Loop terminates when it reaches the OP_Init opcode */ ){
/* Only JUMP opcodes and the short list of special opcodes in the switch
** below need to be considered. The mkopcodeh.tcl generator script groups
** all these opcodes together near the front of the opcode list. Skip
@@ -83777,6 +85323,10 @@ SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(
int iDest = pOp->p2; /* Jump destination */
if( iDest==0 ) continue;
if( pOp->opcode==OP_Gosub ) continue;
+ if( pOp->p3==20230325 && pOp->opcode==OP_NotNull ){
+ /* This is a deliberately taken illegal branch. tag-20230325-2 */
+ continue;
+ }
if( iDest<0 ){
int j = ADDR(iDest);
assert( j>=0 );
@@ -84025,8 +85575,8 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(
pScan = 0;
}
if( pScan ){
- pScan->addrLoop = addrLoop;
- pScan->addrVisit = addrVisit;
+ if( addrLoop>0 ) pScan->addrLoop = addrLoop;
+ if( addrVisit>0 ) pScan->addrVisit = addrVisit;
}
}
}
@@ -84109,7 +85659,7 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){
/*
** If the input FuncDef structure is ephemeral, then free it. If
-** the FuncDef is not ephermal, then do nothing.
+** the FuncDef is not ephemeral, then do nothing.
*/
static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
assert( db!=0 );
@@ -84170,6 +85720,10 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4);
break;
}
+ case P4_TABLEREF: {
+ if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4);
+ break;
+ }
}
}
@@ -84273,7 +85827,6 @@ SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters(
}
#endif /* SQLITE_DEBUG */
-
/*
** Change the value of the P4 operand for a specific instruction.
** This routine is useful when a large program is loaded from a
@@ -84298,7 +85851,7 @@ static void SQLITE_NOINLINE vdbeChangeP4Full(
int n
){
if( pOp->p4type ){
- freeP4(p->db, pOp->p4type, pOp->p4.p);
+ assert( pOp->p4type > P4_FREE_IF_LE );
pOp->p4type = 0;
pOp->p4.p = 0;
}
@@ -85194,7 +86747,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
sqlite3VdbeMemSetInt64(pMem+1, pOp->p2);
sqlite3VdbeMemSetInt64(pMem+2, pOp->p3);
sqlite3VdbeMemSetStr(pMem+3, zP4, -1, SQLITE_UTF8, sqlite3_free);
- p->nResColumn = 4;
+ assert( p->nResColumn==4 );
}else{
sqlite3VdbeMemSetInt64(pMem+0, i);
sqlite3VdbeMemSetStr(pMem+1, (char*)sqlite3OpcodeName(pOp->opcode),
@@ -85213,7 +86766,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
sqlite3VdbeMemSetNull(pMem+7);
#endif
sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free);
- p->nResColumn = 8;
+ assert( p->nResColumn==8 );
}
p->pResultRow = pMem;
if( db->mallocFailed ){
@@ -85427,26 +86980,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
resolveP2Values(p, &nArg);
p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
if( pParse->explain ){
- static const char * const azColName[] = {
- "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment",
- "id", "parent", "notused", "detail"
- };
- int iFirst, mx, i;
if( nMem<10 ) nMem = 10;
p->explain = pParse->explain;
- if( pParse->explain==2 ){
- sqlite3VdbeSetNumCols(p, 4);
- iFirst = 8;
- mx = 12;
- }else{
- sqlite3VdbeSetNumCols(p, 8);
- iFirst = 0;
- mx = 8;
- }
- for(i=iFirst; i<mx; i++){
- sqlite3VdbeSetColName(p, i-iFirst, COLNAME_NAME,
- azColName[i], SQLITE_STATIC);
- }
+ p->nResColumn = 12 - 4*p->explain;
}
p->expired = 0;
@@ -85498,7 +87034,23 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
if( pCx ) sqlite3VdbeFreeCursorNN(p,pCx);
}
+static SQLITE_NOINLINE void freeCursorWithCache(Vdbe *p, VdbeCursor *pCx){
+ VdbeTxtBlbCache *pCache = pCx->pCache;
+ assert( pCx->colCache );
+ pCx->colCache = 0;
+ pCx->pCache = 0;
+ if( pCache->pCValue ){
+ sqlite3RCStrUnref(pCache->pCValue);
+ pCache->pCValue = 0;
+ }
+ sqlite3DbFree(p->db, pCache);
+ sqlite3VdbeFreeCursorNN(p, pCx);
+}
SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe *p, VdbeCursor *pCx){
+ if( pCx->colCache ){
+ freeCursorWithCache(p, pCx);
+ return;
+ }
switch( pCx->eCurType ){
case CURTYPE_SORTER: {
sqlite3VdbeSorterClose(p->db, pCx);
@@ -85599,12 +87151,12 @@ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){
int n;
sqlite3 *db = p->db;
- if( p->nResColumn ){
- releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ if( p->nResAlloc ){
+ releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N);
sqlite3DbFree(db, p->aColName);
}
n = nResColumn*COLNAME_N;
- p->nResColumn = (u16)nResColumn;
+ p->nResColumn = p->nResAlloc = (u16)nResColumn;
p->aColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n );
if( p->aColName==0 ) return;
initMemArray(p->aColName, n, db, MEM_Null);
@@ -85629,14 +87181,14 @@ SQLITE_PRIVATE int sqlite3VdbeSetColName(
){
int rc;
Mem *pColName;
- assert( idx<p->nResColumn );
+ assert( idx<p->nResAlloc );
assert( var<COLNAME_N );
if( p->db->mallocFailed ){
assert( !zName || xDel!=SQLITE_DYNAMIC );
return SQLITE_NOMEM_BKPT;
}
assert( p->aColName!=0 );
- pColName = &(p->aColName[idx+var*p->nResColumn]);
+ pColName = &(p->aColName[idx+var*p->nResAlloc]);
rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel);
assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 );
return rc;
@@ -86149,6 +87701,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
sqlite3VdbeLeave(p);
return SQLITE_BUSY;
}else if( rc!=SQLITE_OK ){
+ sqlite3SystemError(db, rc);
p->rc = rc;
sqlite3RollbackAll(db, SQLITE_OK);
p->nChange = 0;
@@ -86460,7 +88013,7 @@ static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
assert( db!=0 );
assert( p->db==0 || p->db==db );
if( p->aColName ){
- releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N);
sqlite3DbNNFreeNN(db, p->aColName);
}
for(pSub=p->pProgram; pSub; pSub=pNext){
@@ -87060,6 +88613,15 @@ static int vdbeRecordCompareDebug(
if( d1+(u64)serial_type1+2>(u64)nKey1
&& d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1
){
+ if( serial_type1>=1
+ && serial_type1<=7
+ && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)<=(u64)nKey1+8
+ && CORRUPT_DB
+ ){
+ return 1; /* corrupt record not detected by
+ ** sqlite3VdbeRecordCompareWithSkip(). Return true
+ ** to avoid firing the assert() */
+ }
break;
}
@@ -87228,20 +88790,33 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem
return n1 - n2;
}
+/* The following two functions are used only within testcase() to prove
+** test coverage. These functions do no exist for production builds.
+** We must use separate SQLITE_NOINLINE functions here, since otherwise
+** optimizer code movement causes gcov to become very confused.
+*/
+#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG)
+static int SQLITE_NOINLINE doubleLt(double a, double b){ return a<b; }
+static int SQLITE_NOINLINE doubleEq(double a, double b){ return a==b; }
+#endif
+
/*
** Do a comparison between a 64-bit signed integer and a 64-bit floating-point
** number. Return negative, zero, or positive if the first (i64) is less than,
** equal to, or greater than the second (double).
*/
SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){
- if( sizeof(LONGDOUBLE_TYPE)>8 ){
+ if( sqlite3IsNaN(r) ){
+ /* SQLite considers NaN to be a NULL. And all integer values are greater
+ ** than NULL */
+ return 1;
+ }
+ if( sqlite3Config.bUseLongDouble ){
LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i;
testcase( x<r );
testcase( x>r );
testcase( x==r );
- if( x<r ) return -1;
- if( x>r ) return +1; /*NO_TEST*/ /* work around bugs in gcov */
- return 0; /*NO_TEST*/ /* work around bugs in gcov */
+ return (x<r) ? -1 : (x>r);
}else{
i64 y;
double s;
@@ -87251,9 +88826,10 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){
if( i<y ) return -1;
if( i>y ) return +1;
s = (double)i;
- if( s<r ) return -1;
- if( s>r ) return +1;
- return 0;
+ testcase( doubleLt(s,r) );
+ testcase( doubleLt(r,s) );
+ testcase( doubleEq(r,s) );
+ return (s<r) ? -1 : (s>r);
}
}
@@ -87503,7 +89079,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
/* Serial types 12 or greater are strings and blobs (greater than
** numbers). Types 10 and 11 are currently "reserved for future
** use", so it doesn't really matter what the results of comparing
- ** them to numberic values are. */
+ ** them to numeric values are. */
rc = serial_type==10 ? -1 : +1;
}else if( serial_type==0 ){
rc = -1;
@@ -88398,7 +89974,15 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
int rc = SQLITE_OK;
Vdbe *p = (Vdbe*)pStmt;
#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex;
+ sqlite3_mutex *mutex;
+#endif
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+#if SQLITE_THREADSAFE
+ mutex = p->db->mutex;
#endif
sqlite3_mutex_enter(mutex);
for(i=0; i<p->nVar; i++){
@@ -88621,7 +90205,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){
** is too big or if an OOM occurs.
**
** The invokeValueDestructor(P,X) routine invokes destructor function X()
-** on value P is not going to be used and need to be destroyed.
+** on value P if P is not going to be used and need to be destroyed.
*/
static void setResultStrOrError(
sqlite3_context *pCtx, /* Function context */
@@ -88651,7 +90235,7 @@ static void setResultStrOrError(
static int invokeValueDestructor(
const void *p, /* Value to destroy */
void (*xDel)(void*), /* The destructor */
- sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */
+ sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if not NULL */
){
assert( xDel!=SQLITE_DYNAMIC );
if( xDel==0 ){
@@ -88661,7 +90245,14 @@ static int invokeValueDestructor(
}else{
xDel((void*)p);
}
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx!=0 ){
+ sqlite3_result_error_toobig(pCtx);
+ }
+#else
+ assert( pCtx!=0 );
sqlite3_result_error_toobig(pCtx);
+#endif
return SQLITE_TOOBIG;
}
SQLITE_API void sqlite3_result_blob(
@@ -88670,6 +90261,12 @@ SQLITE_API void sqlite3_result_blob(
int n,
void (*xDel)(void *)
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 || n<0 ){
+ invokeValueDestructor(z, xDel, pCtx);
+ return;
+ }
+#endif
assert( n>=0 );
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, 0, xDel);
@@ -88680,8 +90277,14 @@ SQLITE_API void sqlite3_result_blob64(
sqlite3_uint64 n,
void (*xDel)(void *)
){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
assert( xDel!=SQLITE_DYNAMIC );
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
+ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
if( n>0x7fffffff ){
(void)invokeValueDestructor(z, xDel, pCtx);
}else{
@@ -88689,30 +90292,48 @@ SQLITE_API void sqlite3_result_blob64(
}
}
SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
}
SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
}
#ifndef SQLITE_OMIT_UTF16
SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
}
#endif
SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
}
SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
}
SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
}
@@ -88722,14 +90343,37 @@ SQLITE_API void sqlite3_result_pointer(
const char *zPType,
void (*xDestructor)(void*)
){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(pPtr, xDestructor, 0);
+ return;
+ }
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
sqlite3VdbeMemRelease(pOut);
pOut->flags = MEM_Null;
sqlite3VdbeMemSetPointer(pOut, pPtr, zPType, xDestructor);
}
SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
+#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0
+ if( pCtx->pFunc!=0
+ && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0
+ ){
+ char zErr[200];
+ sqlite3_snprintf(sizeof(zErr), zErr,
+ "misuse of sqlite3_result_subtype() by %s()",
+ pCtx->pFunc->zName);
+ sqlite3_result_error(pCtx, zErr, -1);
+ return;
+ }
+#endif /* SQLITE_STRICT_SUBTYPE */
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
pOut->eSubtype = eSubtype & 0xff;
pOut->flags |= MEM_Subtype;
@@ -88740,6 +90384,12 @@ SQLITE_API void sqlite3_result_text(
int n,
void (*xDel)(void *)
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
}
@@ -88750,6 +90400,12 @@ SQLITE_API void sqlite3_result_text64(
void (*xDel)(void *),
unsigned char enc
){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ){
+ invokeValueDestructor(z, xDel, 0);
+ return;
+ }
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
assert( xDel!=SQLITE_DYNAMIC );
if( enc!=SQLITE_UTF8 ){
@@ -88760,6 +90416,7 @@ SQLITE_API void sqlite3_result_text64(
(void)invokeValueDestructor(z, xDel, pCtx);
}else{
setResultStrOrError(pCtx, z, (int)n, enc, xDel);
+ sqlite3VdbeMemZeroTerminateIfAble(pCtx->pOut);
}
}
#ifndef SQLITE_OMIT_UTF16
@@ -88792,7 +90449,16 @@ SQLITE_API void sqlite3_result_text16le(
}
#endif /* SQLITE_OMIT_UTF16 */
SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+ if( pValue==0 ){
+ sqlite3_result_null(pCtx);
+ return;
+ }
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemCopy(pOut, pValue);
sqlite3VdbeChangeEncoding(pOut, pCtx->enc);
@@ -88804,7 +90470,12 @@ SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
sqlite3_result_zeroblob64(pCtx, n>0 ? n : 0);
}
SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
- Mem *pOut = pCtx->pOut;
+ Mem *pOut;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
sqlite3_result_error_toobig(pCtx);
@@ -88818,6 +90489,9 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
#endif
}
SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
pCtx->isError = errCode ? errCode : -1;
#ifdef SQLITE_DEBUG
if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
@@ -88830,6 +90504,9 @@ SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
/* Force an SQLITE_TOOBIG error. */
SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_TOOBIG;
sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,
@@ -88838,6 +90515,9 @@ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
/* An SQLITE_NOMEM error. */
SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
pCtx->isError = SQLITE_NOMEM_BKPT;
@@ -89090,6 +90770,9 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
** pointer to it.
*/
SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#endif
assert( p && p->pFunc );
return p->pFunc->pUserData;
}
@@ -89105,7 +90788,11 @@ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
** application defined function.
*/
SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#else
assert( p && p->pOut );
+#endif
return p->pOut->db;
}
@@ -89124,7 +90811,11 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
** value, as a signal to the xUpdate routine that the column is unchanged.
*/
SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return 0;
+#else
assert( p );
+#endif
return sqlite3_value_nochange(p->pOut);
}
@@ -89132,7 +90823,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
** The destructor function for a ValueList object. This needs to be
** a separate function, unknowable to the application, to ensure that
** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not
-** preceeded by activation of IN processing via sqlite3_vtab_int() do not
+** preceded by activation of IN processing via sqlite3_vtab_int() do not
** try to access a fake ValueList object inserted by a hostile extension.
*/
SQLITE_PRIVATE void sqlite3VdbeValueListFree(void *pToDelete){
@@ -89152,7 +90843,7 @@ static int valueFromValueList(
ValueList *pRhs;
*ppOut = 0;
- if( pVal==0 ) return SQLITE_MISUSE;
+ if( pVal==0 ) return SQLITE_MISUSE_BKPT;
if( (pVal->flags & MEM_Dyn)==0 || pVal->xDel!=sqlite3VdbeValueListFree ){
return SQLITE_ERROR;
}else{
@@ -89283,6 +90974,9 @@ SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
AuxData *pAuxData;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return 0;
+#endif
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
#if SQLITE_ENABLE_STAT4
if( pCtx->pVdbe==0 ) return 0;
@@ -89315,8 +91009,12 @@ SQLITE_API void sqlite3_set_auxdata(
void (*xDelete)(void*)
){
AuxData *pAuxData;
- Vdbe *pVdbe = pCtx->pVdbe;
+ Vdbe *pVdbe;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pCtx==0 ) return;
+#endif
+ pVdbe= pCtx->pVdbe;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
#ifdef SQLITE_ENABLE_STAT4
if( pVdbe==0 ) goto failed;
@@ -89372,7 +91070,8 @@ SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
*/
SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
- return pVm ? pVm->nResColumn : 0;
+ if( pVm==0 ) return 0;
+ return pVm->nResColumn;
}
/*
@@ -89461,7 +91160,7 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){
** sqlite3_column_real()
** sqlite3_column_bytes()
** sqlite3_column_bytes16()
-** sqiite3_column_blob()
+** sqlite3_column_blob()
*/
static void columnMallocFailure(sqlite3_stmt *pStmt)
{
@@ -89546,6 +91245,32 @@ SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
}
/*
+** Column names appropriate for EXPLAIN or EXPLAIN QUERY PLAN.
+*/
+static const char * const azExplainColNames8[] = {
+ "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", /* EXPLAIN */
+ "id", "parent", "notused", "detail" /* EQP */
+};
+static const u16 azExplainColNames16data[] = {
+ /* 0 */ 'a', 'd', 'd', 'r', 0,
+ /* 5 */ 'o', 'p', 'c', 'o', 'd', 'e', 0,
+ /* 12 */ 'p', '1', 0,
+ /* 15 */ 'p', '2', 0,
+ /* 18 */ 'p', '3', 0,
+ /* 21 */ 'p', '4', 0,
+ /* 24 */ 'p', '5', 0,
+ /* 27 */ 'c', 'o', 'm', 'm', 'e', 'n', 't', 0,
+ /* 35 */ 'i', 'd', 0,
+ /* 38 */ 'p', 'a', 'r', 'e', 'n', 't', 0,
+ /* 45 */ 'n', 'o', 't', 'u', 's', 'e', 'd', 0,
+ /* 53 */ 'd', 'e', 't', 'a', 'i', 'l', 0
+};
+static const u8 iExplainColNames16[] = {
+ 0, 5, 12, 15, 18, 21, 24, 27,
+ 35, 38, 45, 53
+};
+
+/*
** Convert the N-th element of pStmt->pColName[] into a string using
** xFunc() then return that string. If N is out of range, return 0.
**
@@ -89577,15 +91302,29 @@ static const void *columnName(
return 0;
}
#endif
+ if( N<0 ) return 0;
ret = 0;
p = (Vdbe *)pStmt;
db = p->db;
assert( db!=0 );
- n = sqlite3_column_count(pStmt);
- if( N<n && N>=0 ){
+ sqlite3_mutex_enter(db->mutex);
+
+ if( p->explain ){
+ if( useType>0 ) goto columnName_end;
+ n = p->explain==1 ? 8 : 4;
+ if( N>=n ) goto columnName_end;
+ if( useUtf16 ){
+ int i = iExplainColNames16[N + 8*p->explain - 8];
+ ret = (void*)&azExplainColNames16data[i];
+ }else{
+ ret = (void*)azExplainColNames8[N + 8*p->explain - 8];
+ }
+ goto columnName_end;
+ }
+ n = p->nResColumn;
+ if( N<n ){
u8 prior_mallocFailed = db->mallocFailed;
N += useType*n;
- sqlite3_mutex_enter(db->mutex);
#ifndef SQLITE_OMIT_UTF16
if( useUtf16 ){
ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]);
@@ -89602,8 +91341,9 @@ static const void *columnName(
sqlite3OomClear(db);
ret = 0;
}
- sqlite3_mutex_leave(db->mutex);
}
+columnName_end:
+ sqlite3_mutex_leave(db->mutex);
return ret;
}
@@ -89696,7 +91436,7 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
/*
** Unbind the value bound to variable i in virtual machine p. This is the
** the same as binding a NULL value to the column. If the "i" parameter is
-** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK.
+** out of range, then SQLITE_RANGE is returned. Otherwise SQLITE_OK.
**
** A successful evaluation of this routine acquires the mutex on p.
** the mutex is released if any kind of error occurs.
@@ -89711,7 +91451,7 @@ static int vdbeUnbind(Vdbe *p, unsigned int i){
}
sqlite3_mutex_enter(p->db->mutex);
if( p->eVdbeState!=VDBE_READY_STATE ){
- sqlite3Error(p->db, SQLITE_MISUSE);
+ sqlite3Error(p->db, SQLITE_MISUSE_BKPT);
sqlite3_mutex_leave(p->db->mutex);
sqlite3_log(SQLITE_MISUSE,
"bind on a busy prepared statement: [%s]", p->zSql);
@@ -89940,6 +91680,9 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(p->db->mutex);
if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){
rc = SQLITE_TOOBIG;
@@ -90061,6 +91804,42 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){
}
/*
+** Set the explain mode for a statement.
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode){
+ Vdbe *v = (Vdbe*)pStmt;
+ int rc;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( pStmt==0 ) return SQLITE_MISUSE_BKPT;
+#endif
+ sqlite3_mutex_enter(v->db->mutex);
+ if( ((int)v->explain)==eMode ){
+ rc = SQLITE_OK;
+ }else if( eMode<0 || eMode>2 ){
+ rc = SQLITE_ERROR;
+ }else if( (v->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ){
+ rc = SQLITE_ERROR;
+ }else if( v->eVdbeState!=VDBE_READY_STATE ){
+ rc = SQLITE_BUSY;
+ }else if( v->nMem>=10 && (eMode!=2 || v->haveEqpOps) ){
+ /* No reprepare necessary */
+ v->explain = eMode;
+ rc = SQLITE_OK;
+ }else{
+ v->explain = eMode;
+ rc = sqlite3Reprepare(v);
+ v->haveEqpOps = eMode==2;
+ }
+ if( v->explain ){
+ v->nResColumn = 12 - 4*v->explain;
+ }else{
+ v->nResColumn = v->nResAlloc;
+ }
+ sqlite3_mutex_leave(v->db->mutex);
+ return rc;
+}
+
+/*
** Return true if the prepared statement is in need of being reset.
*/
SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
@@ -90199,10 +91978,16 @@ static UnpackedRecord *vdbeUnpackRecord(
** a field of the row currently being updated or deleted.
*/
SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
Mem *pMem;
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 || ppValue==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ p = db->pPreUpdate;
/* Test that this call is being made from within an SQLITE_DELETE or
** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */
if( !p || p->op==SQLITE_INSERT ){
@@ -90263,7 +92048,12 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa
** the number of columns in the row being updated, deleted or inserted.
*/
SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->keyinfo.nKeyField : 0);
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -90281,7 +92071,12 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
** or SET DEFAULT action is considered a trigger.
*/
SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->v->nFrame : 0);
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -90292,7 +92087,12 @@ SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
** only.
*/
SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ p = db!=0 ? db->pPreUpdate : 0;
+#else
+ p = db->pPreUpdate;
+#endif
return (p ? p->iBlobWrite : -1);
}
#endif
@@ -90303,10 +92103,16 @@ SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){
** a field of the row currently being updated or inserted.
*/
SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
- PreUpdate *p = db->pPreUpdate;
+ PreUpdate *p;
int rc = SQLITE_OK;
Mem *pMem;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 || ppValue==0 ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+ p = db->pPreUpdate;
if( !p || p->op==SQLITE_DELETE ){
rc = SQLITE_MISUSE_BKPT;
goto preupdate_new_out;
@@ -90385,11 +92191,20 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2(
void *pOut /* OUT: Write the answer here */
){
Vdbe *p = (Vdbe*)pStmt;
- VdbeOp *aOp = p->aOp;
- int nOp = p->nOp;
+ VdbeOp *aOp;
+ int nOp;
ScanStatus *pScan = 0;
int idx;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( p==0 || pOut==0
+ || iScanStatusOp<SQLITE_SCANSTAT_NLOOP
+ || iScanStatusOp>SQLITE_SCANSTAT_NCYCLE ){
+ return 1;
+ }
+#endif
+ aOp = p->aOp;
+ nOp = p->nOp;
if( p->pFrame ){
VdbeFrame *pFrame;
for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent);
@@ -90536,7 +92351,7 @@ SQLITE_API int sqlite3_stmt_scanstatus(
SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
int ii;
- for(ii=0; ii<p->nOp; ii++){
+ for(ii=0; p!=0 && ii<p->nOp; ii++){
Op *pOp = &p->aOp[ii];
pOp->nExec = 0;
pOp->nCycle = 0;
@@ -90875,11 +92690,12 @@ SQLITE_API int sqlite3_found_count = 0;
** sqlite3CantopenError(lineno)
*/
static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){
- static int n = 0;
+ static u64 n = 0;
(void)pc;
(void)pOp;
(void)v;
n++;
+ if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */
}
#endif
@@ -91299,6 +93115,9 @@ SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, StrAccum *pStr){
sqlite3_str_appendchar(pStr, 1, (c>=0x20&&c<=0x7f) ? c : '.');
}
sqlite3_str_appendf(pStr, "]%s", encnames[pMem->enc]);
+ if( f & MEM_Term ){
+ sqlite3_str_appendf(pStr, "(0-term)");
+ }
}
}
#endif
@@ -91435,6 +93254,93 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){
return h;
}
+
+/*
+** For OP_Column, factor out the case where content is loaded from
+** overflow pages, so that the code to implement this case is separate
+** the common case where all content fits on the page. Factoring out
+** the code reduces register pressure and helps the common case
+** to run faster.
+*/
+static SQLITE_NOINLINE int vdbeColumnFromOverflow(
+ VdbeCursor *pC, /* The BTree cursor from which we are reading */
+ int iCol, /* The column to read */
+ int t, /* The serial-type code for the column value */
+ i64 iOffset, /* Offset to the start of the content value */
+ u32 cacheStatus, /* Current Vdbe.cacheCtr value */
+ u32 colCacheCtr, /* Current value of the column cache counter */
+ Mem *pDest /* Store the value into this register. */
+){
+ int rc;
+ sqlite3 *db = pDest->db;
+ int encoding = pDest->enc;
+ int len = sqlite3VdbeSerialTypeLen(t);
+ assert( pC->eCurType==CURTYPE_BTREE );
+ if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) return SQLITE_TOOBIG;
+ if( len > 4000 && pC->pKeyInfo==0 ){
+ /* Cache large column values that are on overflow pages using
+ ** an RCStr (reference counted string) so that if they are reloaded,
+ ** that do not have to be copied a second time. The overhead of
+ ** creating and managing the cache is such that this is only
+ ** profitable for larger TEXT and BLOB values.
+ **
+ ** Only do this on table-btrees so that writes to index-btrees do not
+ ** need to clear the cache. This buys performance in the common case
+ ** in exchange for generality.
+ */
+ VdbeTxtBlbCache *pCache;
+ char *pBuf;
+ if( pC->colCache==0 ){
+ pC->pCache = sqlite3DbMallocZero(db, sizeof(VdbeTxtBlbCache) );
+ if( pC->pCache==0 ) return SQLITE_NOMEM;
+ pC->colCache = 1;
+ }
+ pCache = pC->pCache;
+ if( pCache->pCValue==0
+ || pCache->iCol!=iCol
+ || pCache->cacheStatus!=cacheStatus
+ || pCache->colCacheCtr!=colCacheCtr
+ || pCache->iOffset!=sqlite3BtreeOffset(pC->uc.pCursor)
+ ){
+ if( pCache->pCValue ) sqlite3RCStrUnref(pCache->pCValue);
+ pBuf = pCache->pCValue = sqlite3RCStrNew( len+3 );
+ if( pBuf==0 ) return SQLITE_NOMEM;
+ rc = sqlite3BtreePayload(pC->uc.pCursor, iOffset, len, pBuf);
+ if( rc ) return rc;
+ pBuf[len] = 0;
+ pBuf[len+1] = 0;
+ pBuf[len+2] = 0;
+ pCache->iCol = iCol;
+ pCache->cacheStatus = cacheStatus;
+ pCache->colCacheCtr = colCacheCtr;
+ pCache->iOffset = sqlite3BtreeOffset(pC->uc.pCursor);
+ }else{
+ pBuf = pCache->pCValue;
+ }
+ assert( t>=12 );
+ sqlite3RCStrRef(pBuf);
+ if( t&1 ){
+ rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, encoding,
+ sqlite3RCStrUnref);
+ pDest->flags |= MEM_Term;
+ }else{
+ rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, 0,
+ sqlite3RCStrUnref);
+ }
+ }else{
+ rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, iOffset, len, pDest);
+ if( rc ) return rc;
+ sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
+ if( (t&1)!=0 && encoding==SQLITE_UTF8 ){
+ pDest->z[len] = 0;
+ pDest->flags |= MEM_Term;
+ }
+ }
+ pDest->flags &= ~MEM_Ephem;
+ return rc;
+}
+
+
/*
** Return the symbolic name for the data type of a pMem
*/
@@ -91477,6 +93383,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
Mem *pIn2 = 0; /* 2nd input operand */
Mem *pIn3 = 0; /* 3rd input operand */
Mem *pOut = 0; /* Output operand */
+ u32 colCacheCtr = 0; /* Column cache counter */
#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
u64 *pnCycle = 0;
int bStmtScanStatus = IS_STMT_SCANSTATUS(db)!=0;
@@ -91672,8 +93579,8 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
case OP_Goto: { /* jump */
#ifdef SQLITE_DEBUG
- /* In debuggging mode, when the p5 flags is set on an OP_Goto, that
- ** means we should really jump back to the preceeding OP_ReleaseReg
+ /* In debugging mode, when the p5 flags is set on an OP_Goto, that
+ ** means we should really jump back to the preceding OP_ReleaseReg
** instruction. */
if( pOp->p5 ){
assert( pOp->p2 < (int)(pOp - aOp) );
@@ -91881,7 +93788,7 @@ case OP_HaltIfNull: { /* in3 */
** P5 is a value between 0 and 4, inclusive, that modifies the P4 string.
**
** 0: (no change)
-** 1: NOT NULL contraint failed: P4
+** 1: NOT NULL constraint failed: P4
** 2: UNIQUE constraint failed: P4
** 3: CHECK constraint failed: P4
** 4: FOREIGN KEY constraint failed: P4
@@ -92685,7 +94592,7 @@ case OP_AddImm: { /* in1 */
pIn1 = &aMem[pOp->p1];
memAboutToChange(p, pIn1);
sqlite3VdbeMemIntegerify(pIn1);
- pIn1->u.i += pOp->p2;
+ *(u64*)&pIn1->u.i += (u64)pOp->p2;
break;
}
@@ -93012,10 +94919,10 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
** opcodes are allowed to occur between this instruction and the previous
** OP_Lt or OP_Gt.
**
-** If result of an OP_Eq comparison on the same two operands as the
-** prior OP_Lt or OP_Gt would have been true, then jump to P2.
-** If the result of an OP_Eq comparison on the two previous
-** operands would have been false or NULL, then fall through.
+** If the result of an OP_Eq comparison on the same two operands as
+** the prior OP_Lt or OP_Gt would have been true, then jump to P2. If
+** the result of an OP_Eq comparison on the two previous operands
+** would have been false or NULL, then fall through.
*/
case OP_ElseEq: { /* same as TK_ESCAPE, jump */
@@ -93445,7 +95352,7 @@ case OP_IsType: { /* jump */
/* Opcode: ZeroOrNull P1 P2 P3 * *
** Synopsis: r[P2] = 0 OR NULL
**
-** If all both registers P1 and P3 are NOT NULL, then store a zero in
+** If both registers P1 and P3 are NOT NULL, then store a zero in
** register P2. If either registers P1 or P3 are NULL then put
** a NULL in register P2.
*/
@@ -93799,11 +95706,16 @@ op_column_restart:
pDest->flags = aFlag[t&1];
}
}else{
+ u8 p5;
pDest->enc = encoding;
+ assert( pDest->db==db );
/* This branch happens only when content is on overflow pages */
- if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0
- && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0))
- || (len = sqlite3VdbeSerialTypeLen(t))==0
+ if( ((p5 = (pOp->p5 & OPFLAG_BYTELENARG))!=0
+ && (p5==OPFLAG_TYPEOFARG
+ || (t>=12 && ((t&1)==0 || p5==OPFLAG_BYTELENARG))
+ )
+ )
+ || sqlite3VdbeSerialTypeLen(t)==0
){
/* Content is irrelevant for
** 1. the typeof() function,
@@ -93820,11 +95732,13 @@ op_column_restart:
*/
sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest);
}else{
- if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big;
- rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest);
- if( rc!=SQLITE_OK ) goto abort_due_to_error;
- sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
- pDest->flags &= ~MEM_Ephem;
+ rc = vdbeColumnFromOverflow(pC, p2, t, aOffset[p2],
+ p->cacheCtr, colCacheCtr, pDest);
+ if( rc ){
+ if( rc==SQLITE_NOMEM ) goto no_mem;
+ if( rc==SQLITE_TOOBIG ) goto too_big;
+ goto abort_due_to_error;
+ }
}
}
@@ -94286,7 +96200,6 @@ case OP_MakeRecord: {
/* NULL value. No change in zPayload */
}else{
u64 v;
- u32 i;
if( serial_type==7 ){
assert( sizeof(v)==sizeof(pRec->u.r) );
memcpy(&v, &pRec->u.r, sizeof(v));
@@ -94294,12 +96207,17 @@ case OP_MakeRecord: {
}else{
v = pRec->u.i;
}
- len = i = sqlite3SmallTypeSizes[serial_type];
- assert( i>0 );
- while( 1 /*exit-by-break*/ ){
- zPayload[--i] = (u8)(v&0xFF);
- if( i==0 ) break;
- v >>= 8;
+ len = sqlite3SmallTypeSizes[serial_type];
+ assert( len>=1 && len<=8 && len!=5 && len!=7 );
+ switch( len ){
+ default: zPayload[7] = (u8)(v&0xff); v >>= 8;
+ zPayload[6] = (u8)(v&0xff); v >>= 8;
+ case 6: zPayload[5] = (u8)(v&0xff); v >>= 8;
+ zPayload[4] = (u8)(v&0xff); v >>= 8;
+ case 4: zPayload[3] = (u8)(v&0xff); v >>= 8;
+ case 3: zPayload[2] = (u8)(v&0xff); v >>= 8;
+ case 2: zPayload[1] = (u8)(v&0xff); v >>= 8;
+ case 1: zPayload[0] = (u8)(v&0xff);
}
zPayload += len;
}
@@ -95108,7 +97026,7 @@ case OP_OpenEphemeral: { /* ncycle */
}
pCx = p->apCsr[pOp->p1];
if( pCx && !pCx->noReuse && ALWAYS(pOp->p2<=pCx->nField) ){
- /* If the ephermeral table is already open and has no duplicates from
+ /* If the ephemeral table is already open and has no duplicates from
** OP_OpenDup, then erase all existing content so that the table is
** empty again, rather than creating a new table. */
assert( pCx->isEphemeral );
@@ -95599,7 +97517,7 @@ seek_not_found:
** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If
** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0
** case occurs when there are no inequality constraints to the right of
-** the IN constraing. The jump to SeekGE.P2 ends the loop. The P5!=0 case
+** the IN constraint. The jump to SeekGE.P2 ends the loop. The P5!=0 case
** occurs when there are inequality constraints to the right of the IN
** operator. In that case, the This.P2 will point either directly to or
** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for
@@ -95607,7 +97525,7 @@ seek_not_found:
**
** Possible outcomes from this opcode:<ol>
**
-** <li> If the cursor is initally not pointed to any valid row, then
+** <li> If the cursor is initially not pointed to any valid row, then
** fall through into the subsequent OP_SeekGE opcode.
**
** <li> If the cursor is left pointing to a row that is before the target
@@ -95839,13 +97757,13 @@ case OP_IfNotOpen: { /* jump */
** operands to OP_NotFound and OP_IdxGT.
**
** This opcode is an optimization attempt only. If this opcode always
-** falls through, the correct answer is still obtained, but extra works
+** falls through, the correct answer is still obtained, but extra work
** is performed.
**
** A value of N in the seekHit flag of cursor P1 means that there exists
** a key P3:N that will match some record in the index. We want to know
** if it is possible for a record P3:P4 to match some record in the
-** index. If it is not possible, we can skips some work. So if seekHit
+** index. If it is not possible, we can skip some work. So if seekHit
** is less than P4, attempt to find out if a match is possible by running
** OP_NotFound.
**
@@ -96357,6 +98275,7 @@ case OP_Insert: {
);
pC->deferredMoveto = 0;
pC->cacheStatus = CACHE_STALE;
+ colCacheCtr++;
/* Invoke the update-hook if required. */
if( rc ) goto abort_due_to_error;
@@ -96410,13 +98329,18 @@ case OP_RowCell: {
** left in an undefined state.
**
** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this
-** delete one of several associated with deleting a table row and all its
-** associated index entries. Exactly one of those deletes is the "primary"
-** delete. The others are all on OPFLAG_FORDELETE cursors or else are
-** marked with the AUXDELETE flag.
+** delete is one of several associated with deleting a table row and
+** all its associated index entries. Exactly one of those deletes is
+** the "primary" delete. The others are all on OPFLAG_FORDELETE
+** cursors or else are marked with the AUXDELETE flag.
+**
+** If the OPFLAG_NCHANGE (0x01) flag of P2 (NB: P2 not P5) is set, then
+** the row change count is incremented (otherwise not).
**
-** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row
-** change count is incremented (otherwise not).
+** If the OPFLAG_ISNOOP (0x40) flag of P2 (not P5!) is set, then the
+** pre-update-hook for deletes is run, but the btree is otherwise unchanged.
+** This happens when the OP_Delete is to be shortly followed by an OP_Insert
+** with the same key, causing the btree entry to be overwritten.
**
** P1 must not be pseudo-table. It has to be a real table with
** multiple rows.
@@ -96517,6 +98441,7 @@ case OP_Delete: {
rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5);
pC->cacheStatus = CACHE_STALE;
+ colCacheCtr++;
pC->seekResult = 0;
if( rc ) goto abort_due_to_error;
@@ -96584,13 +98509,13 @@ case OP_SorterCompare: {
** Write into register P2 the current sorter data for sorter cursor P1.
** Then clear the column header cache on cursor P3.
**
-** This opcode is normally use to move a record out of the sorter and into
+** This opcode is normally used to move a record out of the sorter and into
** a register that is the source for a pseudo-table cursor created using
** OpenPseudo. That pseudo-table cursor is the one that is identified by
** parameter P3. Clearing the P3 column cache as part of this opcode saves
** us from having to issue a separate NullRow instruction to clear that cache.
*/
-case OP_SorterData: {
+case OP_SorterData: { /* ncycle */
VdbeCursor *pC;
pOut = &aMem[pOp->p2];
@@ -96865,8 +98790,8 @@ case OP_IfSmaller: { /* jump */
** regression tests can determine whether or not the optimizer is
** correctly optimizing out sorts.
*/
-case OP_SorterSort: /* jump */
-case OP_Sort: { /* jump */
+case OP_SorterSort: /* jump ncycle */
+case OP_Sort: { /* jump ncycle */
#ifdef SQLITE_TEST
sqlite3_sort_count++;
sqlite3_search_count--;
@@ -97393,7 +99318,7 @@ case OP_IdxGE: { /* jump, ncycle */
** file is given by P1.
**
** The table being destroyed is in the main database file if P3==0. If
-** P3==1 then the table to be clear is in the auxiliary database file
+** P3==1 then the table to be destroyed is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If AUTOVACUUM is enabled then it is possible that another root page
@@ -97453,8 +99378,8 @@ case OP_Destroy: { /* out2 */
** in the database file is given by P1. But, unlike Destroy, do not
** remove the table or index from the database file.
**
-** The table being clear is in the main database file if P2==0. If
-** P2==1 then the table to be clear is in the auxiliary database file
+** The table being cleared is in the main database file if P2==0. If
+** P2==1 then the table to be cleared is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If the P3 value is non-zero, then the row change count is incremented
@@ -97540,13 +99465,41 @@ case OP_CreateBtree: { /* out2 */
/* Opcode: SqlExec * * * P4 *
**
** Run the SQL statement or statements specified in the P4 string.
+** Disable Auth and Trace callbacks while those statements are running if
+** P1 is true.
*/
case OP_SqlExec: {
+ char *zErr;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ sqlite3_xauth xAuth;
+#endif
+ u8 mTrace;
+
sqlite3VdbeIncrWriteCounter(p, 0);
db->nSqlExec++;
- rc = sqlite3_exec(db, pOp->p4.z, 0, 0, 0);
+ zErr = 0;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ xAuth = db->xAuth;
+#endif
+ mTrace = db->mTrace;
+ if( pOp->p1 ){
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = 0;
+#endif
+ db->mTrace = 0;
+ }
+ rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr);
db->nSqlExec--;
- if( rc ) goto abort_due_to_error;
+#ifndef SQLITE_OMIT_AUTHORIZATION
+ db->xAuth = xAuth;
+#endif
+ db->mTrace = mTrace;
+ if( zErr || rc ){
+ sqlite3VdbeError(p, "%s", zErr);
+ sqlite3_free(zErr);
+ if( rc==SQLITE_NOMEM ) goto no_mem;
+ goto abort_due_to_error;
+ }
break;
}
@@ -98280,7 +100233,7 @@ case OP_AggStep1: {
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
+ ** reinitializes the relevant parts of the sqlite3_context object */
if( pCtx->pMem != pMem ){
pCtx->pMem = pMem;
for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
@@ -98768,6 +100721,52 @@ case OP_VOpen: { /* ncycle */
#endif /* SQLITE_OMIT_VIRTUALTABLE */
#ifndef SQLITE_OMIT_VIRTUALTABLE
+/* Opcode: VCheck P1 P2 P3 P4 *
+**
+** P4 is a pointer to a Table object that is a virtual table in schema P1
+** that supports the xIntegrity() method. This opcode runs the xIntegrity()
+** method for that virtual table, using P3 as the integer argument. If
+** an error is reported back, the table name is prepended to the error
+** message and that message is stored in P2. If no errors are seen,
+** register P2 is set to NULL.
+*/
+case OP_VCheck: { /* out2 */
+ Table *pTab;
+ sqlite3_vtab *pVtab;
+ const sqlite3_module *pModule;
+ char *zErr = 0;
+
+ pOut = &aMem[pOp->p2];
+ sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */
+ assert( pOp->p4type==P4_TABLEREF );
+ pTab = pOp->p4.pTab;
+ assert( pTab!=0 );
+ assert( pTab->nTabRef>0 );
+ assert( IsVirtual(pTab) );
+ if( pTab->u.vtab.p==0 ) break;
+ pVtab = pTab->u.vtab.p->pVtab;
+ assert( pVtab!=0 );
+ pModule = pVtab->pModule;
+ assert( pModule!=0 );
+ assert( pModule->iVersion>=4 );
+ assert( pModule->xIntegrity!=0 );
+ sqlite3VtabLock(pTab->u.vtab.p);
+ assert( pOp->p1>=0 && pOp->p1<db->nDb );
+ rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName,
+ pOp->p3, &zErr);
+ sqlite3VtabUnlock(pTab->u.vtab.p);
+ if( rc ){
+ sqlite3_free(zErr);
+ goto abort_due_to_error;
+ }
+ if( zErr ){
+ sqlite3VdbeMemSetStr(pOut, zErr, -1, SQLITE_UTF8, sqlite3_free);
+ }
+ break;
+}
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
/* Opcode: VInitIn P1 P2 P3 * *
** Synopsis: r[P2]=ValueList(P1,P3)
**
@@ -98880,6 +100879,7 @@ case OP_VColumn: { /* ncycle */
const sqlite3_module *pModule;
Mem *pDest;
sqlite3_context sContext;
+ FuncDef nullFunc;
VdbeCursor *pCur = p->apCsr[pOp->p1];
assert( pCur!=0 );
@@ -98897,6 +100897,9 @@ case OP_VColumn: { /* ncycle */
memset(&sContext, 0, sizeof(sContext));
sContext.pOut = pDest;
sContext.enc = encoding;
+ nullFunc.pUserData = 0;
+ nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE;
+ sContext.pFunc = &nullFunc;
assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 );
if( pOp->p5 & OPFLAG_NOCHNG ){
sqlite3VdbeMemSetNull(pDest);
@@ -99158,7 +101161,7 @@ case OP_MaxPgcnt: { /* out2 */
** This opcode works exactly like OP_Function. The only difference is in
** its name. This opcode is used in places where the function must be
** purely non-deterministic. Some built-in date/time functions can be
-** either determinitic of non-deterministic, depending on their arguments.
+** either deterministic of non-deterministic, depending on their arguments.
** When those function are used in a non-deterministic way, they will check
** to see if they were called using OP_PureFunc instead of OP_Function, and
** if they were, they throw an error.
@@ -99176,7 +101179,7 @@ case OP_Function: { /* group */
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
- ** reinitializes the relavant parts of the sqlite3_context object */
+ ** reinitializes the relevant parts of the sqlite3_context object */
pOut = &aMem[pOp->p3];
if( pCtx->pOut != pOut ){
pCtx->pVdbe = p;
@@ -99229,6 +101232,42 @@ case OP_ClrSubtype: { /* in1 */
break;
}
+/* Opcode: GetSubtype P1 P2 * * *
+** Synopsis: r[P2] = r[P1].subtype
+**
+** Extract the subtype value from register P1 and write that subtype
+** into register P2. If P1 has no subtype, then P1 gets a NULL.
+*/
+case OP_GetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Subtype ){
+ sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype);
+ }else{
+ sqlite3VdbeMemSetNull(pOut);
+ }
+ break;
+}
+
+/* Opcode: SetSubtype P1 P2 * * *
+** Synopsis: r[P2].subtype = r[P1]
+**
+** Set the subtype value of register P2 to the integer from register P1.
+** If P1 is NULL, clear the subtype from p2.
+*/
+case OP_SetSubtype: { /* in1 out2 */
+ pIn1 = &aMem[pOp->p1];
+ pOut = &aMem[pOp->p2];
+ if( pIn1->flags & MEM_Null ){
+ pOut->flags &= ~MEM_Subtype;
+ }else{
+ assert( pIn1->flags & MEM_Int );
+ pOut->flags |= MEM_Subtype;
+ pOut->eSubtype = (u8)(pIn1->u.i & 0xff);
+ }
+ break;
+}
+
/* Opcode: FilterAdd P1 * P3 P4 *
** Synopsis: filter(P1) += key(P3@P4)
**
@@ -99252,7 +101291,7 @@ case OP_FilterAdd: {
printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n));
}
#endif
- h %= pIn1->n;
+ h %= (pIn1->n*8);
pIn1->z[h/8] |= 1<<(h&7);
break;
}
@@ -99288,7 +101327,7 @@ case OP_Filter: { /* jump */
printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n));
}
#endif
- h %= pIn1->n;
+ h %= (pIn1->n*8);
if( (pIn1->z[h/8] & (1<<(h&7)))==0 ){
VdbeBranchTaken(1, 2);
p->aCounter[SQLITE_STMTSTATUS_FILTER_HIT]++;
@@ -99540,7 +101579,7 @@ default: { /* This is really OP_Noop, OP_Explain */
}
if( opProperty==0xff ){
/* Never happens. This code exists to avoid a harmless linkage
- ** warning aboud sqlite3VdbeRegisterDump() being defined but not
+ ** warning about sqlite3VdbeRegisterDump() being defined but not
** used. */
sqlite3VdbeRegisterDump(p);
}
@@ -99713,8 +101752,7 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
/* Set the value of register r[1] in the SQL statement to integer iRow.
** This is done directly as a performance optimization
*/
- v->aMem[1].flags = MEM_Int;
- v->aMem[1].u.i = iRow;
+ sqlite3VdbeMemSetInt64(&v->aMem[1], iRow);
/* If the statement has been run before (and is paused at the OP_ResultRow)
** then back it up to the point where it does the OP_NotExists. This could
@@ -99797,7 +101835,7 @@ SQLITE_API int sqlite3_blob_open(
#endif
*ppBlob = 0;
#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || zTable==0 ){
+ if( !sqlite3SafetyCheckOk(db) || zTable==0 || zColumn==0 ){
return SQLITE_MISUSE_BKPT;
}
#endif
@@ -100258,7 +102296,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
** The threshold for the amount of main memory to use before flushing
** records to a PMA is roughly the same as the limit configured for the
** page-cache of the main database. Specifically, the threshold is set to
-** the value returned by "PRAGMA main.page_size" multipled by
+** the value returned by "PRAGMA main.page_size" multiplied by
** that returned by "PRAGMA main.cache_size", in bytes.
**
** If the sorter is running in single-threaded mode, then all PMAs generated
@@ -100281,7 +102319,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
**
** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the
** sorter is running in single-threaded mode, then these PMAs are merged
-** incrementally as keys are retreived from the sorter by the VDBE. The
+** incrementally as keys are retrieved from the sorter by the VDBE. The
** MergeEngine object, described in further detail below, performs this
** merge.
**
@@ -100359,7 +102397,7 @@ struct SorterFile {
struct SorterList {
SorterRecord *pList; /* Linked list of records */
u8 *aMemory; /* If non-NULL, bulk memory to hold pList */
- int szPMA; /* Size of pList as PMA in bytes */
+ i64 szPMA; /* Size of pList as PMA in bytes */
};
/*
@@ -100444,7 +102482,7 @@ struct MergeEngine {
**
** Essentially, this structure contains all those fields of the VdbeSorter
** structure for which each thread requires a separate instance. For example,
-** each thread requries its own UnpackedRecord object to unpack records in
+** each thread requeries its own UnpackedRecord object to unpack records in
** as part of comparison operations.
**
** Before a background thread is launched, variable bDone is set to 0. Then,
@@ -100468,10 +102506,10 @@ typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int);
struct SortSubtask {
SQLiteThread *pThread; /* Background thread, if any */
int bDone; /* Set if thread is finished but not joined */
+ int nPMA; /* Number of PMAs currently in file */
VdbeSorter *pSorter; /* Sorter that owns this sub-task */
UnpackedRecord *pUnpacked; /* Space to unpack a record */
SorterList list; /* List for thread to write to a PMA */
- int nPMA; /* Number of PMAs currently in file */
SorterCompare xCompare; /* Compare function to use */
SorterFile file; /* Temp file for level-0 PMAs */
SorterFile file2; /* Space for other PMAs */
@@ -100516,7 +102554,7 @@ struct VdbeSorter {
** PMA, in sorted order. The next key to be read is cached in nKey/aKey.
** aKey might point into aMap or into aBuffer. If neither of those locations
** contain a contiguous representation of the key, then aAlloc is allocated
-** and the key is copied into aAlloc and aKey is made to poitn to aAlloc.
+** and the key is copied into aAlloc and aKey is made to point to aAlloc.
**
** pFd==0 at EOF.
*/
@@ -101887,7 +103925,7 @@ static int vdbeSorterFlushPMA(VdbeSorter *pSorter){
** the background thread from a sub-tasks previous turn is still running,
** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy,
** fall back to using the final sub-task. The first (pSorter->nTask-1)
- ** sub-tasks are prefered as they use background threads - the final
+ ** sub-tasks are preferred as they use background threads - the final
** sub-task uses the main thread. */
for(i=0; i<nWorker; i++){
int iTest = (pSorter->iPrev + i + 1) % nWorker;
@@ -101945,8 +103983,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterWrite(
int rc = SQLITE_OK; /* Return Code */
SorterRecord *pNew; /* New list element */
int bFlush; /* True to flush contents of memory to PMA */
- int nReq; /* Bytes of memory required */
- int nPMA; /* Bytes of PMA space required */
+ i64 nReq; /* Bytes of memory required */
+ i64 nPMA; /* Bytes of PMA space required */
int t; /* serial type of first record field */
assert( pCsr->eCurType==CURTYPE_SORTER );
@@ -102371,7 +104409,7 @@ static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){
rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode);
- /* Set up the required files for pIncr. A multi-theaded IncrMerge object
+ /* Set up the required files for pIncr. A multi-threaded IncrMerge object
** requires two temp files to itself, whereas a single-threaded object
** only requires a region of pTask->file2. */
if( rc==SQLITE_OK ){
@@ -103011,6 +105049,8 @@ static int bytecodevtabConnect(
"p5 INT,"
"comment TEXT,"
"subprog TEXT,"
+ "nexec INT,"
+ "ncycle INT,"
"stmt HIDDEN"
");",
@@ -103173,7 +105213,7 @@ static int bytecodevtabColumn(
}
}
}
- i += 10;
+ i += 20;
}
}
switch( i ){
@@ -103223,16 +105263,31 @@ static int bytecodevtabColumn(
}
break;
}
- case 10: /* tables_used.type */
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ case 9: /* nexec */
+ sqlite3_result_int(ctx, pOp->nExec);
+ break;
+ case 10: /* ncycle */
+ sqlite3_result_int(ctx, pOp->nCycle);
+ break;
+#else
+ case 9: /* nexec */
+ case 10: /* ncycle */
+ sqlite3_result_int(ctx, 0);
+ break;
+#endif
+
+ case 20: /* tables_used.type */
sqlite3_result_text(ctx, pCur->zType, -1, SQLITE_STATIC);
break;
- case 11: /* tables_used.schema */
+ case 21: /* tables_used.schema */
sqlite3_result_text(ctx, pCur->zSchema, -1, SQLITE_STATIC);
break;
- case 12: /* tables_used.name */
+ case 22: /* tables_used.name */
sqlite3_result_text(ctx, pCur->zName, -1, SQLITE_STATIC);
break;
- case 13: /* tables_used.wr */
+ case 23: /* tables_used.wr */
sqlite3_result_int(ctx, pOp->opcode==OP_OpenWrite);
break;
}
@@ -103306,7 +105361,7 @@ static int bytecodevtabBestIndex(
int rc = SQLITE_CONSTRAINT;
struct sqlite3_index_constraint *p;
bytecodevtab *pVTab = (bytecodevtab*)tab;
- int iBaseCol = pVTab->bTablesUsed ? 4 : 8;
+ int iBaseCol = pVTab->bTablesUsed ? 4 : 10;
pIdxInfo->estimatedCost = (double)100;
pIdxInfo->estimatedRows = 100;
pIdxInfo->idxNum = 0;
@@ -103353,7 +105408,8 @@ static sqlite3_module bytecodevtabModule = {
/* xSavepoint */ 0,
/* xRelease */ 0,
/* xRollbackTo */ 0,
- /* xShadowName */ 0
+ /* xShadowName */ 0,
+ /* xIntegrity */ 0
};
@@ -103877,7 +105933,7 @@ static int walkWindowList(Walker *pWalker, Window *pList, int bOneOnly){
** The return value from this routine is WRC_Abort to abandon the tree walk
** and WRC_Continue to continue.
*/
-static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
+SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3WalkExprNN(Walker *pWalker, Expr *pExpr){
int rc;
testcase( ExprHasProperty(pExpr, EP_TokenOnly) );
testcase( ExprHasProperty(pExpr, EP_Reduced) );
@@ -103886,7 +105942,9 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
if( rc ) return rc & WRC_Abort;
if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
assert( pExpr->x.pList==0 || pExpr->pRight==0 );
- if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort;
+ if( pExpr->pLeft && sqlite3WalkExprNN(pWalker, pExpr->pLeft) ){
+ return WRC_Abort;
+ }
if( pExpr->pRight ){
assert( !ExprHasProperty(pExpr, EP_WinFunc) );
pExpr = pExpr->pRight;
@@ -103910,7 +105968,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
return WRC_Continue;
}
SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){
- return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue;
+ return pExpr ? sqlite3WalkExprNN(pWalker,pExpr) : WRC_Continue;
}
/*
@@ -104036,7 +106094,7 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
}
/* Increase the walkerDepth when entering a subquery, and
-** descrease when leaving the subquery.
+** decrease when leaving the subquery.
*/
SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker *pWalker, Select *pSelect){
UNUSED_PARAMETER(pSelect);
@@ -104180,21 +106238,36 @@ static void resolveAlias(
}
/*
-** Subqueries stores the original database, table and column names for their
-** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN".
-** Check to see if the zSpan given to this routine matches the zDb, zTab,
-** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will
-** match anything.
+** Subqueries store the original database, table and column names for their
+** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN",
+** and mark the expression-list item by setting ExprList.a[].fg.eEName
+** to ENAME_TAB.
+**
+** Check to see if the zSpan/eEName of the expression-list item passed to this
+** routine matches the zDb, zTab, and zCol. If any of zDb, zTab, and zCol are
+** NULL then those fields will match anything. Return true if there is a match,
+** or false otherwise.
+**
+** SF_NestedFrom subqueries also store an entry for the implicit rowid (or
+** _rowid_, or oid) column by setting ExprList.a[].fg.eEName to ENAME_ROWID,
+** and setting zSpan to "DATABASE.TABLE.<rowid-alias>". This type of pItem
+** argument matches if zCol is a rowid alias. If it is not NULL, (*pbRowid)
+** is set to 1 if there is this kind of match.
*/
SQLITE_PRIVATE int sqlite3MatchEName(
const struct ExprList_item *pItem,
const char *zCol,
const char *zTab,
- const char *zDb
+ const char *zDb,
+ int *pbRowid
){
int n;
const char *zSpan;
- if( pItem->fg.eEName!=ENAME_TAB ) return 0;
+ int eEName = pItem->fg.eEName;
+ if( eEName!=ENAME_TAB && (eEName!=ENAME_ROWID || NEVER(pbRowid==0)) ){
+ return 0;
+ }
+ assert( pbRowid==0 || *pbRowid==0 );
zSpan = pItem->zEName;
for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){}
if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){
@@ -104206,9 +106279,11 @@ SQLITE_PRIVATE int sqlite3MatchEName(
return 0;
}
zSpan += n+1;
- if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){
- return 0;
+ if( zCol ){
+ if( eEName==ENAME_TAB && sqlite3StrICmp(zSpan, zCol)!=0 ) return 0;
+ if( eEName==ENAME_ROWID && sqlite3IsRowid(zCol)==0 ) return 0;
}
+ if( eEName==ENAME_ROWID ) *pbRowid = 1;
return 1;
}
@@ -104241,6 +106316,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){
assert( ExprUseYTab(pExpr) );
pExTab = pExpr->y.pTab;
assert( pExTab!=0 );
+ assert( n < pExTab->nCol );
if( (pExTab->tabFlags & TF_HasGenerated)!=0
&& (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0
){
@@ -104341,7 +106417,7 @@ static int lookupName(
){
int i, j; /* Loop counters */
int cnt = 0; /* Number of matching column names */
- int cntTab = 0; /* Number of matching table names */
+ int cntTab = 0; /* Number of potential "rowid" matches */
int nSubquery = 0; /* How many levels of subquery */
sqlite3 *db = pParse->db; /* The database connection */
SrcItem *pItem; /* Use for looping over pSrcList items */
@@ -104418,39 +106494,49 @@ static int lookupName(
assert( pEList!=0 );
assert( pEList->nExpr==pTab->nCol );
for(j=0; j<pEList->nExpr; j++){
- if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){
+ int bRowid = 0; /* True if possible rowid match */
+ if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb, &bRowid) ){
continue;
}
- if( cnt>0 ){
- if( pItem->fg.isUsing==0
- || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
- ){
- /* Two or more tables have the same column name which is
- ** not joined by USING. This is an error. Signal as much
- ** by clearing pFJMatch and letting cnt go above 1. */
- sqlite3ExprListDelete(db, pFJMatch);
- pFJMatch = 0;
- }else
- if( (pItem->fg.jointype & JT_RIGHT)==0 ){
- /* An INNER or LEFT JOIN. Use the left-most table */
- continue;
- }else
- if( (pItem->fg.jointype & JT_LEFT)==0 ){
- /* A RIGHT JOIN. Use the right-most table */
- cnt = 0;
- sqlite3ExprListDelete(db, pFJMatch);
- pFJMatch = 0;
- }else{
- /* For a FULL JOIN, we must construct a coalesce() func */
- extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ if( bRowid==0 ){
+ if( cnt>0 ){
+ if( pItem->fg.isUsing==0
+ || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
+ ){
+ /* Two or more tables have the same column name which is
+ ** not joined by USING. This is an error. Signal as much
+ ** by clearing pFJMatch and letting cnt go above 1. */
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else
+ if( (pItem->fg.jointype & JT_RIGHT)==0 ){
+ /* An INNER or LEFT JOIN. Use the left-most table */
+ continue;
+ }else
+ if( (pItem->fg.jointype & JT_LEFT)==0 ){
+ /* A RIGHT JOIN. Use the right-most table */
+ cnt = 0;
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else{
+ /* For a FULL JOIN, we must construct a coalesce() func */
+ extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ }
}
+ cnt++;
+ hit = 1;
+ }else if( cnt>0 ){
+ /* This is a potential rowid match, but there has already been
+ ** a real match found. So this can be ignored. */
+ continue;
}
- cnt++;
- cntTab = 2;
+ cntTab++;
pMatch = pItem;
pExpr->iColumn = j;
pEList->a[j].fg.bUsed = 1;
- hit = 1;
+
+ /* rowid cannot be part of a USING clause - assert() this. */
+ assert( bRowid==0 || pEList->a[j].fg.bUsingTerm==0 );
if( pEList->a[j].fg.bUsingTerm ) break;
}
if( hit || zTab==0 ) continue;
@@ -104645,10 +106731,10 @@ static int lookupName(
&& pMatch
&& (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0
&& sqlite3IsRowid(zCol)
- && ALWAYS(VisibleRowid(pMatch->pTab))
+ && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom)
){
cnt = 1;
- pExpr->iColumn = -1;
+ if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1;
pExpr->affExpr = SQLITE_AFF_INTEGER;
}
@@ -104807,6 +106893,7 @@ static int lookupName(
sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr);
pParse->checkSchema = 1;
pTopNC->nNcErr++;
+ eNewExprOp = TK_NULL;
}
assert( pFJMatch==0 );
@@ -104833,7 +106920,7 @@ static int lookupName(
** If a generated column is referenced, set bits for every column
** of the table.
*/
- if( pExpr->iColumn>=0 && pMatch!=0 ){
+ if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){
pMatch->colUsed |= sqlite3ExprColUsed(pExpr);
}
@@ -105101,6 +107188,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0);
#endif
assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) );
+ assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER );
zId = pExpr->u.zToken;
pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0);
if( pDef==0 ){
@@ -105242,6 +107330,10 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
pNC->nNcErr++;
}
#endif
+ else if( is_agg==0 && pExpr->pLeft ){
+ sqlite3ExprOrderByAggregateError(pParse, pExpr);
+ pNC->nNcErr++;
+ }
if( is_agg ){
/* Window functions may not be arguments of aggregate functions.
** Or arguments of other window functions. But aggregate functions
@@ -105260,6 +107352,11 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
#endif
sqlite3WalkExprList(pWalker, pList);
if( is_agg ){
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pWin ){
Select *pSel = pNC->pWinSelect;
@@ -105288,11 +107385,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
while( pNC2
&& sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0
){
- pExpr->op2++;
+ pExpr->op2 += (1 + pNC2->nNestedSelect);
pNC2 = pNC2->pNext;
}
assert( pDef!=0 || IN_RENAME_OBJECT );
if( pNC2 && pDef ){
+ pExpr->op2 += pNC2->nNestedSelect;
assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg );
testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
@@ -105770,7 +107868,7 @@ static int resolveOrderGroupBy(
}
for(j=0; j<pSelect->pEList->nExpr; j++){
if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){
- /* Since this expresion is being changed into a reference
+ /* Since this expression is being changed into a reference
** to an identical expression in the result set, remove all Window
** objects belonging to the expression from the Select.pWin list. */
windowRemoveExprFromSelect(pSelect, pE);
@@ -105823,10 +107921,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
while( p ){
assert( (p->selFlags & SF_Expanded)!=0 );
assert( (p->selFlags & SF_Resolved)==0 );
- assert( db->suppressErr==0 ); /* SF_Resolved not set if errors suppressed */
p->selFlags |= SF_Resolved;
-
/* Resolve the expressions in the LIMIT and OFFSET clauses. These
** are not allowed to refer to any names, so pass an empty NameContext.
*/
@@ -105853,6 +107949,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
/* Recursively resolve names in all subqueries in the FROM clause
*/
+ if( pOuterNC ) pOuterNC->nNestedSelect++;
for(i=0; i<p->pSrc->nSrc; i++){
SrcItem *pItem = &p->pSrc->a[i];
if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){
@@ -105877,6 +107974,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
}
}
}
+ if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){
+ pOuterNC->nNestedSelect--;
+ }
/* Set up the local name-context to pass to sqlite3ResolveExprNames() to
** resolve the result-set expression list.
@@ -106093,7 +108193,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
return SQLITE_ERROR;
}
#endif
- sqlite3WalkExpr(&w, pExpr);
+ assert( pExpr!=0 );
+ sqlite3WalkExprNN(&w, pExpr);
#if SQLITE_MAX_EXPR_DEPTH>0
w.pParse->nHeight -= pExpr->nHeight;
#endif
@@ -106135,7 +108236,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames(
return WRC_Abort;
}
#endif
- sqlite3WalkExpr(&w, pExpr);
+ sqlite3WalkExprNN(&w, pExpr);
#if SQLITE_MAX_EXPR_DEPTH>0
w.pParse->nHeight -= pExpr->nHeight;
#endif
@@ -106157,7 +108258,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames(
/*
** Resolve all names in all expressions of a SELECT and in all
-** decendents of the SELECT, including compounds off of p->pPrior,
+** descendants of the SELECT, including compounds off of p->pPrior,
** subqueries in expressions, and subqueries used as FROM clause
** terms.
**
@@ -106307,6 +108408,7 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
if( op==TK_SELECT_COLUMN ){
assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) );
assert( pExpr->iColumn < pExpr->iTable );
+ assert( pExpr->iColumn >= 0 );
assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr );
return sqlite3ExprAffinity(
pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
@@ -106543,7 +108645,7 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){
/*
** Return the collation sequence for the expression pExpr. If
** there is no defined collating sequence, return a pointer to the
-** defautl collation sequence.
+** default collation sequence.
**
** See also: sqlite3ExprCollSeq()
**
@@ -106673,7 +108775,7 @@ SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(
return pColl;
}
-/* Expresssion p is a comparison operator. Return a collation sequence
+/* Expression p is a comparison operator. Return a collation sequence
** appropriate for the comparison operator.
**
** This is normally just a wrapper around sqlite3BinaryCompareCollSeq().
@@ -106830,6 +108932,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
*/
pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0);
if( pRet ){
+ ExprSetProperty(pRet, EP_FullSize);
pRet->iTable = nField;
pRet->iColumn = iField;
pRet->pLeft = pVector;
@@ -107130,6 +109233,15 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
#endif /* SQLITE_MAX_EXPR_DEPTH>0 */
/*
+** Set the error offset for an Expr node, if possible.
+*/
+SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){
+ if( pExpr==0 ) return;
+ if( NEVER(ExprUseWJoin(pExpr)) ) return;
+ pExpr->w.iOfst = iOfst;
+}
+
+/*
** This routine is the core allocator for Expr nodes.
**
** Construct a new expression node and return a pointer to it. Memory
@@ -107412,6 +109524,67 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(
}
/*
+** Report an error when attempting to use an ORDER BY clause within
+** the arguments of a non-aggregate function.
+*/
+SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse *pParse, Expr *p){
+ sqlite3ErrorMsg(pParse,
+ "ORDER BY may not be used with non-aggregate %#T()", p
+ );
+}
+
+/*
+** Attach an ORDER BY clause to a function call.
+**
+** functionname( arguments ORDER BY sortlist )
+** \_____________________/ \______/
+** pExpr pOrderBy
+**
+** The ORDER BY clause is inserted into a new Expr node of type TK_ORDER
+** and added to the Expr.pLeft field of the parent TK_FUNCTION node.
+*/
+SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(
+ Parse *pParse, /* Parsing context */
+ Expr *pExpr, /* The function call to which ORDER BY is to be added */
+ ExprList *pOrderBy /* The ORDER BY clause to add */
+){
+ Expr *pOB;
+ sqlite3 *db = pParse->db;
+ if( NEVER(pOrderBy==0) ){
+ assert( db->mallocFailed );
+ return;
+ }
+ if( pExpr==0 ){
+ assert( db->mallocFailed );
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+ assert( pExpr->op==TK_FUNCTION );
+ assert( pExpr->pLeft==0 );
+ assert( ExprUseXList(pExpr) );
+ if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){
+ /* Ignore ORDER BY on zero-argument aggregates */
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy);
+ return;
+ }
+ if( IsWindowFunc(pExpr) ){
+ sqlite3ExprOrderByAggregateError(pParse, pExpr);
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+
+ pOB = sqlite3ExprAlloc(db, TK_ORDER, 0, 0);
+ if( pOB==0 ){
+ sqlite3ExprListDelete(db, pOrderBy);
+ return;
+ }
+ pOB->x.pList = pOrderBy;
+ assert( ExprUseXList(pOB) );
+ pExpr->pLeft = pOB;
+ ExprSetProperty(pOB, EP_FullSize);
+}
+
+/*
** Check to see if a function is usable according to current access
** rules:
**
@@ -107572,6 +109745,9 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){
if( p ) sqlite3ExprDeleteNN(db, p);
}
+SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p);
+}
/*
** Clear both elements of an OnOrUsing object
@@ -107589,7 +109765,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
/*
** Arrange to cause pExpr to be deleted when the pParse is deleted.
** This is similar to sqlite3ExprDelete() except that the delete is
-** deferred untilthe pParse is deleted.
+** deferred until the pParse is deleted.
**
** The pExpr might be deleted immediately on an OOM error.
**
@@ -107597,9 +109773,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
** pExpr to the pParse->pConstExpr list with a register number of 0.
*/
SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprDelete,
- pExpr);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr);
}
/* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
@@ -107664,11 +109838,7 @@ static int dupedExprStructSize(const Expr *p, int flags){
assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */
assert( EXPR_FULLSIZE<=0xfff );
assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 );
- if( 0==flags || p->op==TK_SELECT_COLUMN
-#ifndef SQLITE_OMIT_WINDOWFUNC
- || ExprHasProperty(p, EP_WinFunc)
-#endif
- ){
+ if( 0==flags || ExprHasProperty(p, EP_FullSize) ){
nSize = EXPR_FULLSIZE;
}else{
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
@@ -107699,56 +109869,93 @@ static int dupedExprNodeSize(const Expr *p, int flags){
/*
** Return the number of bytes required to create a duplicate of the
-** expression passed as the first argument. The second argument is a
-** mask containing EXPRDUP_XXX flags.
+** expression passed as the first argument.
**
** The value returned includes space to create a copy of the Expr struct
** itself and the buffer referred to by Expr.u.zToken, if any.
**
-** If the EXPRDUP_REDUCE flag is set, then the return value includes
-** space to duplicate all Expr nodes in the tree formed by Expr.pLeft
-** and Expr.pRight variables (but not for any structures pointed to or
-** descended from the Expr.x.pList or Expr.x.pSelect variables).
+** The return value includes space to duplicate all Expr nodes in the
+** tree formed by Expr.pLeft and Expr.pRight, but not any other
+** substructure such as Expr.x.pList, Expr.x.pSelect, and Expr.y.pWin.
*/
-static int dupedExprSize(const Expr *p, int flags){
- int nByte = 0;
- if( p ){
- nByte = dupedExprNodeSize(p, flags);
- if( flags&EXPRDUP_REDUCE ){
- nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags);
- }
- }
+static int dupedExprSize(const Expr *p){
+ int nByte;
+ assert( p!=0 );
+ nByte = dupedExprNodeSize(p, EXPRDUP_REDUCE);
+ if( p->pLeft ) nByte += dupedExprSize(p->pLeft);
+ if( p->pRight ) nByte += dupedExprSize(p->pRight);
+ assert( nByte==ROUND8(nByte) );
return nByte;
}
/*
-** This function is similar to sqlite3ExprDup(), except that if pzBuffer
-** is not NULL then *pzBuffer is assumed to point to a buffer large enough
-** to store the copy of expression p, the copies of p->u.zToken
-** (if applicable), and the copies of the p->pLeft and p->pRight expressions,
-** if any. Before returning, *pzBuffer is set to the first byte past the
-** portion of the buffer copied into by this function.
+** An EdupBuf is a memory allocation used to stored multiple Expr objects
+** together with their Expr.zToken content. This is used to help implement
+** compression while doing sqlite3ExprDup(). The top-level Expr does the
+** allocation for itself and many of its decendents, then passes an instance
+** of the structure down into exprDup() so that they decendents can have
+** access to that memory.
+*/
+typedef struct EdupBuf EdupBuf;
+struct EdupBuf {
+ u8 *zAlloc; /* Memory space available for storage */
+#ifdef SQLITE_DEBUG
+ u8 *zEnd; /* First byte past the end of memory */
+#endif
+};
+
+/*
+** This function is similar to sqlite3ExprDup(), except that if pEdupBuf
+** is not NULL then it points to memory that can be used to store a copy
+** of the input Expr p together with its p->u.zToken (if any). pEdupBuf
+** is updated with the new buffer tail prior to returning.
*/
-static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
+static Expr *exprDup(
+ sqlite3 *db, /* Database connection (for memory allocation) */
+ const Expr *p, /* Expr tree to be duplicated */
+ int dupFlags, /* EXPRDUP_REDUCE for compression. 0 if not */
+ EdupBuf *pEdupBuf /* Preallocated storage space, or NULL */
+){
Expr *pNew; /* Value to return */
- u8 *zAlloc; /* Memory space from which to build Expr object */
+ EdupBuf sEdupBuf; /* Memory space from which to build Expr object */
u32 staticFlag; /* EP_Static if space not obtained from malloc */
+ int nToken = -1; /* Space needed for p->u.zToken. -1 means unknown */
assert( db!=0 );
assert( p );
assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE );
- assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE );
+ assert( pEdupBuf==0 || dupFlags==EXPRDUP_REDUCE );
/* Figure out where to write the new Expr structure. */
- if( pzBuffer ){
- zAlloc = *pzBuffer;
+ if( pEdupBuf ){
+ sEdupBuf.zAlloc = pEdupBuf->zAlloc;
+#ifdef SQLITE_DEBUG
+ sEdupBuf.zEnd = pEdupBuf->zEnd;
+#endif
staticFlag = EP_Static;
- assert( zAlloc!=0 );
+ assert( sEdupBuf.zAlloc!=0 );
+ assert( dupFlags==EXPRDUP_REDUCE );
}else{
- zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags));
+ int nAlloc;
+ if( dupFlags ){
+ nAlloc = dupedExprSize(p);
+ }else if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nToken = sqlite3Strlen30NN(p->u.zToken)+1;
+ nAlloc = ROUND8(EXPR_FULLSIZE + nToken);
+ }else{
+ nToken = 0;
+ nAlloc = ROUND8(EXPR_FULLSIZE);
+ }
+ assert( nAlloc==ROUND8(nAlloc) );
+ sEdupBuf.zAlloc = sqlite3DbMallocRawNN(db, nAlloc);
+#ifdef SQLITE_DEBUG
+ sEdupBuf.zEnd = sEdupBuf.zAlloc ? sEdupBuf.zAlloc+nAlloc : 0;
+#endif
+
staticFlag = 0;
}
- pNew = (Expr *)zAlloc;
+ pNew = (Expr *)sEdupBuf.zAlloc;
+ assert( EIGHT_BYTE_ALIGNMENT(pNew) );
if( pNew ){
/* Set nNewSize to the size allocated for the structure pointed to
@@ -107757,22 +109964,27 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
** by the copy of the p->u.zToken string (if any).
*/
const unsigned nStructSize = dupedExprStructSize(p, dupFlags);
- const int nNewSize = nStructSize & 0xfff;
- int nToken;
- if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
- nToken = sqlite3Strlen30(p->u.zToken) + 1;
- }else{
- nToken = 0;
+ int nNewSize = nStructSize & 0xfff;
+ if( nToken<0 ){
+ if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){
+ nToken = sqlite3Strlen30(p->u.zToken) + 1;
+ }else{
+ nToken = 0;
+ }
}
if( dupFlags ){
+ assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= nNewSize+nToken );
assert( ExprHasProperty(p, EP_Reduced)==0 );
- memcpy(zAlloc, p, nNewSize);
+ memcpy(sEdupBuf.zAlloc, p, nNewSize);
}else{
u32 nSize = (u32)exprStructSize(p);
- memcpy(zAlloc, p, nSize);
+ assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >=
+ (int)EXPR_FULLSIZE+nToken );
+ memcpy(sEdupBuf.zAlloc, p, nSize);
if( nSize<EXPR_FULLSIZE ){
- memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize);
+ memset(&sEdupBuf.zAlloc[nSize], 0, EXPR_FULLSIZE-nSize);
}
+ nNewSize = EXPR_FULLSIZE;
}
/* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */
@@ -107785,44 +109997,50 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
}
/* Copy the p->u.zToken string, if any. */
- if( nToken ){
- char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize];
+ assert( nToken>=0 );
+ if( nToken>0 ){
+ char *zToken = pNew->u.zToken = (char*)&sEdupBuf.zAlloc[nNewSize];
memcpy(zToken, p->u.zToken, nToken);
+ nNewSize += nToken;
}
+ sEdupBuf.zAlloc += ROUND8(nNewSize);
+
+ if( ((p->flags|pNew->flags)&(EP_TokenOnly|EP_Leaf))==0 ){
- if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){
/* Fill in the pNew->x.pSelect or pNew->x.pList member. */
if( ExprUseXSelect(p) ){
pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags);
}else{
- pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags);
+ pNew->x.pList = sqlite3ExprListDup(db, p->x.pList,
+ p->op!=TK_ORDER ? dupFlags : 0);
}
- }
- /* Fill in pNew->pLeft and pNew->pRight. */
- if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly|EP_WinFunc) ){
- zAlloc += dupedExprNodeSize(p, dupFlags);
- if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){
- pNew->pLeft = p->pLeft ?
- exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0;
- pNew->pRight = p->pRight ?
- exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0;
- }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( ExprHasProperty(p, EP_WinFunc) ){
pNew->y.pWin = sqlite3WindowDup(db, pNew, p->y.pWin);
assert( ExprHasProperty(pNew, EP_WinFunc) );
}
#endif /* SQLITE_OMIT_WINDOWFUNC */
- if( pzBuffer ){
- *pzBuffer = zAlloc;
- }
- }else{
- if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
- if( pNew->op==TK_SELECT_COLUMN ){
+
+ /* Fill in pNew->pLeft and pNew->pRight. */
+ if( dupFlags ){
+ if( p->op==TK_SELECT_COLUMN ){
pNew->pLeft = p->pLeft;
- assert( p->pRight==0 || p->pRight==p->pLeft
- || ExprHasProperty(p->pLeft, EP_Subquery) );
+ assert( p->pRight==0
+ || p->pRight==p->pLeft
+ || ExprHasProperty(p->pLeft, EP_Subquery) );
+ }else{
+ pNew->pLeft = p->pLeft ?
+ exprDup(db, p->pLeft, EXPRDUP_REDUCE, &sEdupBuf) : 0;
+ }
+ pNew->pRight = p->pRight ?
+ exprDup(db, p->pRight, EXPRDUP_REDUCE, &sEdupBuf) : 0;
+ }else{
+ if( p->op==TK_SELECT_COLUMN ){
+ pNew->pLeft = p->pLeft;
+ assert( p->pRight==0
+ || p->pRight==p->pLeft
+ || ExprHasProperty(p->pLeft, EP_Subquery) );
}else{
pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
}
@@ -107830,6 +110048,8 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
}
}
}
+ if( pEdupBuf ) memcpy(pEdupBuf, &sEdupBuf, sizeof(sEdupBuf));
+ assert( sEdupBuf.zAlloc <= sEdupBuf.zEnd );
return pNew;
}
@@ -108094,11 +110314,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *p, int flags)
** initially NULL, then create a new expression list.
**
** The pList argument must be either NULL or a pointer to an ExprList
-** obtained from a prior call to sqlite3ExprListAppend(). This routine
-** may not be used with an ExprList obtained from sqlite3ExprListDup().
-** Reason: This routine assumes that the number of slots in pList->a[]
-** is a power of two. That is true for sqlite3ExprListAppend() returns
-** but is not necessarily true from the return value of sqlite3ExprListDup().
+** obtained from a prior call to sqlite3ExprListAppend().
**
** If a memory allocation error occurs, the entire list is freed and
** NULL is returned. If non-NULL is returned, then it is guaranteed
@@ -108363,6 +110579,9 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
if( pList ) exprListDeleteNN(db, pList);
}
+SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){
+ if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList);
+}
/*
** Return the bitwise-OR of all Expr.flags fields in the given
@@ -108431,7 +110650,7 @@ SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){
** and 0 if it is FALSE.
*/
SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){
- pExpr = sqlite3ExprSkipCollate((Expr*)pExpr);
+ pExpr = sqlite3ExprSkipCollateAndLikely((Expr*)pExpr);
assert( pExpr->op==TK_TRUEFALSE );
assert( !ExprHasProperty(pExpr, EP_IntValue) );
assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0
@@ -108862,9 +111081,10 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){
case TK_COLUMN:
assert( ExprUseYTab(p) );
return ExprHasProperty(p, EP_CanBeNull) ||
- p->y.pTab==0 || /* Reference to column of index on expression */
+ NEVER(p->y.pTab==0) || /* Reference to column of index on expr */
(p->iColumn>=0
&& p->y.pTab->aCol!=0 /* Possible due to prior error */
+ && ALWAYS(p->iColumn<p->y.pTab->nCol)
&& p->y.pTab->aCol[p->iColumn].notNull==0);
default:
return 1;
@@ -108925,6 +111145,27 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){
}
/*
+** Return a pointer to a buffer containing a usable rowid alias for table
+** pTab. An alias is usable if there is not an explicit user-defined column
+** of the same name.
+*/
+SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){
+ const char *azOpt[] = {"_ROWID_", "ROWID", "OID"};
+ int ii;
+ assert( VisibleRowid(pTab) );
+ for(ii=0; ii<ArraySize(azOpt); ii++){
+ int iCol;
+ for(iCol=0; iCol<pTab->nCol; iCol++){
+ if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break;
+ }
+ if( iCol==pTab->nCol ){
+ return azOpt[ii];
+ }
+ }
+ return 0;
+}
+
+/*
** pX is the RHS of an IN operator. If pX is a SELECT statement
** that can be simplified to a direct table access, then return
** a pointer to the SELECT statement. If pX is not a SELECT statement,
@@ -109024,7 +111265,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index.
** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index.
** IN_INDEX_EPH - The cursor was opened on a specially created and
-** populated epheremal table.
+** populated ephemeral table.
** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be
** implemented as a sequence of comparisons.
**
@@ -109037,7 +111278,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** an ephemeral table might need to be generated from the RHS and then
** pX->iTable made to point to the ephemeral table instead of an
** existing table. In this case, the creation and initialization of the
-** ephmeral table might be put inside of a subroutine, the EP_Subrtn flag
+** ephemeral table might be put inside of a subroutine, the EP_Subrtn flag
** will be set on pX and the pX->y.sub fields will be set to show where
** the subroutine is coded.
**
@@ -109049,12 +111290,12 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
**
** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate
** through the set members) then the b-tree must not contain duplicates.
-** An epheremal table will be created unless the selected columns are guaranteed
+** An ephemeral table will be created unless the selected columns are guaranteed
** to be unique - either because it is an INTEGER PRIMARY KEY or due to
** a UNIQUE constraint or index.
**
** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used
-** for fast set membership tests) then an epheremal table must
+** for fast set membership tests) then an ephemeral table must
** be used unless <columns> is a single INTEGER PRIMARY KEY column or an
** index can be found with the specified <columns> as its left-most.
**
@@ -109387,7 +111628,7 @@ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){
** x IN (SELECT a FROM b) -- IN operator with subquery on the right
**
** The pExpr parameter is the IN operator. The cursor number for the
-** constructed ephermeral table is returned. The first time the ephemeral
+** constructed ephemeral table is returned. The first time the ephemeral
** table is computed, the cursor number is also stored in pExpr->iTable,
** however the cursor number returned might not be the same, as it might
** have been duplicated using OP_OpenDup.
@@ -110202,10 +112443,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
u8 p5 /* P5 value for OP_Column + FLAGS */
){
assert( pParse->pVdbe!=0 );
+ assert( (p5 & (OPFLAG_NOCHNG|OPFLAG_TYPEOFARG|OPFLAG_LENGTHARG))==p5 );
+ assert( IsVirtual(pTab) || (p5 & OPFLAG_NOCHNG)==0 );
sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg);
if( p5 ){
VdbeOp *pOp = sqlite3VdbeGetLastOp(pParse->pVdbe);
if( pOp->opcode==OP_Column ) pOp->p5 = p5;
+ if( pOp->opcode==OP_VColumn ) pOp->p5 = (p5 & OPFLAG_NOCHNG);
}
return iReg;
}
@@ -110234,7 +112478,7 @@ static void exprToRegister(Expr *pExpr, int iReg){
/*
** Evaluate an expression (either a vector or a scalar expression) and store
-** the result in continguous temporary registers. Return the index of
+** the result in contiguous temporary registers. Return the index of
** the first register used to store the result.
**
** If the returned result register is a temporary scalar, then also write
@@ -110274,7 +112518,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
*/
static void setDoNotMergeFlagOnCopy(Vdbe *v){
if( sqlite3VdbeGetLastOp(v)->opcode==OP_Copy ){
- sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */
+ sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergeable */
}
}
@@ -110364,13 +112608,13 @@ static int exprCodeInlineFunction(
}
case INLINEFUNC_implies_nonnull_row: {
- /* REsult of sqlite3ExprImpliesNonNullRow() */
+ /* Result of sqlite3ExprImpliesNonNullRow() */
Expr *pA1;
assert( nFarg==2 );
pA1 = pFarg->a[1].pExpr;
if( pA1->op==TK_COLUMN ){
sqlite3VdbeAddOp2(v, OP_Integer,
- sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable),
+ sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable,1),
target);
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
@@ -110459,6 +112703,41 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
/*
+** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This
+** function checks the Parse.pIdxPartExpr list to see if this column
+** can be replaced with a constant value. If so, it generates code to
+** put the constant value in a register (ideally, but not necessarily,
+** register iTarget) and returns the register number.
+**
+** Or, if the TK_COLUMN cannot be replaced by a constant, zero is
+** returned.
+*/
+static int exprPartidxExprLookup(Parse *pParse, Expr *pExpr, int iTarget){
+ IndexedExpr *p;
+ for(p=pParse->pIdxPartExpr; p; p=p->pIENext){
+ if( pExpr->iColumn==p->iIdxCol && pExpr->iTable==p->iDataCur ){
+ Vdbe *v = pParse->pVdbe;
+ int addr = 0;
+ int ret;
+
+ if( p->bMaybeNullRow ){
+ addr = sqlite3VdbeAddOp1(v, OP_IfNullRow, p->iIdxCur);
+ }
+ ret = sqlite3ExprCodeTarget(pParse, p->pExpr, iTarget);
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_Affinity, ret, 1, 0,
+ (const char*)&p->aff, 1);
+ if( addr ){
+ sqlite3VdbeJumpHere(v, addr);
+ sqlite3VdbeChangeP3(v, addr, ret);
+ }
+ return ret;
+ }
+ }
+ return 0;
+}
+
+
+/*
** Generate code into the current Vdbe to evaluate the given
** expression. Attempt to store the results in register "target".
** Return the register where results are stored.
@@ -110494,6 +112773,7 @@ expr_code_doover:
assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
op = pExpr->op;
}
+ assert( op!=TK_ORDER );
switch( op ){
case TK_AGG_COLUMN: {
AggInfo *pAggInfo = pExpr->pAggInfo;
@@ -110507,7 +112787,7 @@ expr_code_doover:
#ifdef SQLITE_VDBE_COVERAGE
/* Verify that the OP_Null above is exercised by tests
** tag-20230325-2 */
- sqlite3VdbeAddOp2(v, OP_NotNull, target, 1);
+ sqlite3VdbeAddOp3(v, OP_NotNull, target, 1, 20230325);
VdbeCoverageNeverTaken(v);
#endif
break;
@@ -110546,7 +112826,7 @@ expr_code_doover:
if( ExprHasProperty(pExpr, EP_FixedCol) ){
/* This COLUMN expression is really a constant due to WHERE clause
** constraints, and that constant is coded by the pExpr->pLeft
- ** expresssion. However, make sure the constant has the correct
+ ** expression. However, make sure the constant has the correct
** datatype by applying the Affinity of the table column to the
** constant.
*/
@@ -110615,6 +112895,11 @@ expr_code_doover:
iTab = pParse->iSelfTab - 1;
}
}
+ else if( pParse->pIdxPartExpr
+ && 0!=(r1 = exprPartidxExprLookup(pParse, pExpr, target))
+ ){
+ return r1;
+ }
assert( ExprUseYTab(pExpr) );
assert( pExpr->y.pTab!=0 );
iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab,
@@ -110872,7 +113157,7 @@ expr_code_doover:
sqlite3ErrorMsg(pParse, "unknown function: %#T()", pExpr);
break;
}
- if( pDef->funcFlags & SQLITE_FUNC_INLINE ){
+ if( (pDef->funcFlags & SQLITE_FUNC_INLINE)!=0 && ALWAYS(pFarg!=0) ){
assert( (pDef->funcFlags & SQLITE_FUNC_UNSAFE)==0 );
assert( (pDef->funcFlags & SQLITE_FUNC_DIRECT)==0 );
return exprCodeInlineFunction(pParse, pFarg,
@@ -110898,10 +113183,10 @@ expr_code_doover:
r1 = sqlite3GetTempRange(pParse, nFarg);
}
- /* For length() and typeof() functions with a column argument,
+ /* For length() and typeof() and octet_length() functions,
** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG
- ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data
- ** loading.
+ ** or OPFLAG_TYPEOFARG or OPFLAG_BYTELENARG respectively, to avoid
+ ** unnecessary data loading.
*/
if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){
u8 exprOp;
@@ -110911,14 +113196,16 @@ expr_code_doover:
if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){
assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG );
assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG );
- testcase( pDef->funcFlags & OPFLAG_LENGTHARG );
- pFarg->a[0].pExpr->op2 =
- pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG);
+ assert( SQLITE_FUNC_BYTELEN==OPFLAG_BYTELENARG );
+ assert( (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG)==OPFLAG_BYTELENARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_LENGTHARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_TYPEOFARG );
+ testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_BYTELENARG);
+ pFarg->a[0].pExpr->op2 = pDef->funcFlags & OPFLAG_BYTELENARG;
}
}
- sqlite3ExprCodeExprList(pParse, pFarg, r1, 0,
- SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR);
+ sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, SQLITE_ECEL_FACTOR);
}else{
r1 = 0;
}
@@ -111273,9 +113560,9 @@ expr_code_doover:
** once. If no functions are involved, then factor the code out and put it at
** the end of the prepared statement in the initialization section.
**
-** If regDest>=0 then the result is always stored in that register and the
+** If regDest>0 then the result is always stored in that register and the
** result is not reusable. If regDest<0 then this routine is free to
-** store the value whereever it wants. The register where the expression
+** store the value wherever it wants. The register where the expression
** is stored is returned. When regDest<0, two identical expressions might
** code to the same register, if they do not contain function calls and hence
** are factored out into the initialization section at the end of the
@@ -111288,6 +113575,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(
){
ExprList *p;
assert( ConstFactorOk(pParse) );
+ assert( regDest!=0 );
p = pParse->pConstExpr;
if( regDest<0 && p ){
struct ExprList_item *pItem;
@@ -111378,8 +113666,10 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){
inReg = sqlite3ExprCodeTarget(pParse, pExpr, target);
if( inReg!=target ){
u8 op;
- if( ALWAYS(pExpr)
- && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER)
+ Expr *pX = sqlite3ExprSkipCollateAndLikely(pExpr);
+ testcase( pX!=pExpr );
+ if( ALWAYS(pX)
+ && (ExprHasProperty(pX,EP_Subquery) || pX->op==TK_REGISTER)
){
op = OP_Copy;
}else{
@@ -112099,8 +114389,8 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB
*/
SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){
return sqlite3ExprCompare(0,
- sqlite3ExprSkipCollateAndLikely(pA),
- sqlite3ExprSkipCollateAndLikely(pB),
+ sqlite3ExprSkipCollate(pA),
+ sqlite3ExprSkipCollate(pB),
iTab);
}
@@ -112193,7 +114483,7 @@ static int exprImpliesNotNull(
** pE1: x!=123 pE2: x IS NOT NULL Result: true
** pE1: x!=?1 pE2: x IS NOT NULL Result: true
** pE1: x IS NULL pE2: x IS NOT NULL Result: false
-** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false
+** pE1: x IS ?2 pE2: x IS NOT NULL Result: false
**
** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has
** Expr.iTable<0 then assume a table number given by iTab.
@@ -112230,11 +114520,29 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(
return 0;
}
+/* This is a helper function to impliesNotNullRow(). In this routine,
+** set pWalker->eCode to one only if *both* of the input expressions
+** separately have the implies-not-null-row property.
+*/
+static void bothImplyNotNullRow(Walker *pWalker, Expr *pE1, Expr *pE2){
+ if( pWalker->eCode==0 ){
+ sqlite3WalkExpr(pWalker, pE1);
+ if( pWalker->eCode ){
+ pWalker->eCode = 0;
+ sqlite3WalkExpr(pWalker, pE2);
+ }
+ }
+}
+
/*
** This is the Expr node callback for sqlite3ExprImpliesNonNullRow().
** If the expression node requires that the table at pWalker->iCur
** have one or more non-NULL column, then set pWalker->eCode to 1 and abort.
**
+** pWalker->mWFlags is non-zero if this inquiry is being undertaking on
+** behalf of a RIGHT JOIN (or FULL JOIN). That makes a difference when
+** evaluating terms in the ON clause of an inner join.
+**
** This routine controls an optimization. False positives (setting
** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives
** (never setting pWalker->eCode) is a harmless missed optimization.
@@ -112243,28 +114551,33 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_AGG_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
if( ExprHasProperty(pExpr, EP_OuterON) ) return WRC_Prune;
+ if( ExprHasProperty(pExpr, EP_InnerON) && pWalker->mWFlags ){
+ /* If iCur is used in an inner-join ON clause to the left of a
+ ** RIGHT JOIN, that does *not* mean that the table must be non-null.
+ ** But it is difficult to check for that condition precisely.
+ ** To keep things simple, any use of iCur from any inner-join is
+ ** ignored while attempting to simplify a RIGHT JOIN. */
+ return WRC_Prune;
+ }
switch( pExpr->op ){
case TK_ISNOT:
case TK_ISNULL:
case TK_NOTNULL:
case TK_IS:
- case TK_OR:
case TK_VECTOR:
- case TK_CASE:
- case TK_IN:
case TK_FUNCTION:
case TK_TRUTH:
+ case TK_CASE:
testcase( pExpr->op==TK_ISNOT );
testcase( pExpr->op==TK_ISNULL );
testcase( pExpr->op==TK_NOTNULL );
testcase( pExpr->op==TK_IS );
- testcase( pExpr->op==TK_OR );
testcase( pExpr->op==TK_VECTOR );
- testcase( pExpr->op==TK_CASE );
- testcase( pExpr->op==TK_IN );
testcase( pExpr->op==TK_FUNCTION );
testcase( pExpr->op==TK_TRUTH );
+ testcase( pExpr->op==TK_CASE );
return WRC_Prune;
+
case TK_COLUMN:
if( pWalker->u.iCur==pExpr->iTable ){
pWalker->eCode = 1;
@@ -112272,21 +114585,38 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
}
return WRC_Prune;
+ case TK_OR:
case TK_AND:
- if( pWalker->eCode==0 ){
+ /* Both sides of an AND or OR must separately imply non-null-row.
+ ** Consider these cases:
+ ** 1. NOT (x AND y)
+ ** 2. x OR y
+ ** If only one of x or y is non-null-row, then the overall expression
+ ** can be true if the other arm is false (case 1) or true (case 2).
+ */
+ testcase( pExpr->op==TK_OR );
+ testcase( pExpr->op==TK_AND );
+ bothImplyNotNullRow(pWalker, pExpr->pLeft, pExpr->pRight);
+ return WRC_Prune;
+
+ case TK_IN:
+ /* Beware of "x NOT IN ()" and "x NOT IN (SELECT 1 WHERE false)",
+ ** both of which can be true. But apart from these cases, if
+ ** the left-hand side of the IN is NULL then the IN itself will be
+ ** NULL. */
+ if( ExprUseXList(pExpr) && ALWAYS(pExpr->x.pList->nExpr>0) ){
sqlite3WalkExpr(pWalker, pExpr->pLeft);
- if( pWalker->eCode ){
- pWalker->eCode = 0;
- sqlite3WalkExpr(pWalker, pExpr->pRight);
- }
}
return WRC_Prune;
case TK_BETWEEN:
- if( sqlite3WalkExpr(pWalker, pExpr->pLeft)==WRC_Abort ){
- assert( pWalker->eCode );
- return WRC_Abort;
- }
+ /* In "x NOT BETWEEN y AND z" either x must be non-null-row or else
+ ** both y and z must be non-null row */
+ assert( ExprUseXList(pExpr) );
+ assert( pExpr->x.pList->nExpr==2 );
+ sqlite3WalkExpr(pWalker, pExpr->pLeft);
+ bothImplyNotNullRow(pWalker, pExpr->x.pList->a[0].pExpr,
+ pExpr->x.pList->a[1].pExpr);
return WRC_Prune;
/* Virtual tables are allowed to use constraints like x=NULL. So
@@ -112348,7 +114678,7 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
** be non-NULL, then the LEFT JOIN can be safely converted into an
** ordinary join.
*/
-SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
+SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab, int isRJ){
Walker w;
p = sqlite3ExprSkipCollateAndLikely(p);
if( p==0 ) return 0;
@@ -112356,7 +114686,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
p = p->pLeft;
}else{
while( p->op==TK_AND ){
- if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab) ) return 1;
+ if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab, isRJ) ) return 1;
p = p->pRight;
}
}
@@ -112364,6 +114694,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){
w.xSelectCallback = 0;
w.xSelectCallback2 = 0;
w.eCode = 0;
+ w.mWFlags = isRJ!=0;
w.u.iCur = iTab;
sqlite3WalkExpr(&w, p);
return w.eCode;
@@ -112424,7 +114755,7 @@ SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(
}
-/* Structure used to pass information throught the Walker in order to
+/* Structure used to pass information throughout the Walker in order to
** implement sqlite3ReferencesSrcList().
*/
struct RefSrcList {
@@ -112531,6 +114862,12 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList
assert( pExpr->op==TK_AGG_FUNCTION );
assert( ExprUseXList(pExpr) );
sqlite3WalkExprList(&w, pExpr->x.pList);
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ assert( pExpr->pLeft->x.pList!=0 );
+ sqlite3WalkExprList(&w, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
if( ExprHasProperty(pExpr, EP_WinFunc) ){
sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter);
@@ -112640,7 +114977,7 @@ static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){
** Return the index in aCol[] of the entry that describes that column.
**
** If no prior entry is found, create a new one and return -1. The
-** new column will have an idex of pAggInfo->nColumn-1.
+** new column will have an index of pAggInfo->nColumn-1.
*/
static void findOrCreateAggInfoColumn(
Parse *pParse, /* Parsing context */
@@ -112653,6 +114990,7 @@ static void findOrCreateAggInfoColumn(
assert( pAggInfo->iFirstReg==0 );
pCol = pAggInfo->aCol;
for(k=0; k<pAggInfo->nColumn; k++, pCol++){
+ if( pCol->pCExpr==pExpr ) return;
if( pCol->iTable==pExpr->iTable
&& pCol->iColumn==pExpr->iColumn
&& pExpr->op!=TK_IF_NULL_ROW
@@ -112777,13 +115115,14 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
case TK_AGG_FUNCTION: {
if( (pNC->ncFlags & NC_InAggFunc)==0
&& pWalker->walkerDepth==pExpr->op2
+ && pExpr->pAggInfo==0
){
/* Check to see if pExpr is a duplicate of another aggregate
** function that is already in the pAggInfo structure
*/
struct AggInfo_func *pItem = pAggInfo->aFunc;
for(i=0; i<pAggInfo->nFunc; i++, pItem++){
- if( pItem->pFExpr==pExpr ) break;
+ if( NEVER(pItem->pFExpr==pExpr) ) break;
if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){
break;
}
@@ -112794,14 +115133,44 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
u8 enc = ENC(pParse->db);
i = addAggInfoFunc(pParse->db, pAggInfo);
if( i>=0 ){
+ int nArg;
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
pItem = &pAggInfo->aFunc[i];
pItem->pFExpr = pExpr;
assert( ExprUseUToken(pExpr) );
+ nArg = pExpr->x.pList ? pExpr->x.pList->nExpr : 0;
pItem->pFunc = sqlite3FindFunction(pParse->db,
- pExpr->u.zToken,
- pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0);
- if( pExpr->flags & EP_Distinct ){
+ pExpr->u.zToken, nArg, enc, 0);
+ assert( pItem->bOBUnique==0 );
+ if( pExpr->pLeft
+ && (pItem->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)==0
+ ){
+ /* The NEEDCOLL test above causes any ORDER BY clause on
+ ** aggregate min() or max() to be ignored. */
+ ExprList *pOBList;
+ assert( nArg>0 );
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ pItem->iOBTab = pParse->nTab++;
+ pOBList = pExpr->pLeft->x.pList;
+ assert( pOBList->nExpr>0 );
+ assert( pItem->bOBUnique==0 );
+ if( pOBList->nExpr==1
+ && nArg==1
+ && sqlite3ExprCompare(0,pOBList->a[0].pExpr,
+ pExpr->x.pList->a[0].pExpr,0)==0
+ ){
+ pItem->bOBPayload = 0;
+ pItem->bOBUnique = ExprHasProperty(pExpr, EP_Distinct);
+ }else{
+ pItem->bOBPayload = 1;
+ }
+ pItem->bUseSubtype =
+ (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0;
+ }else{
+ pItem->iOBTab = -1;
+ }
+ if( ExprHasProperty(pExpr, EP_Distinct) && !pItem->bOBUnique ){
pItem->iDistinct = pParse->nTab++;
}else{
pItem->iDistinct = -1;
@@ -113437,14 +115806,19 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
/* Verify that constraints are still satisfied */
if( pNew->pCheck!=0
|| (pCol->notNull && (pCol->colFlags & COLFLAG_GENERATED)!=0)
+ || (pTab->tabFlags & TF_Strict)!=0
){
sqlite3NestedParse(pParse,
"SELECT CASE WHEN quick_check GLOB 'CHECK*'"
" THEN raise(ABORT,'CHECK constraint failed')"
+ " WHEN quick_check GLOB 'non-* value in*'"
+ " THEN raise(ABORT,'type mismatch on DEFAULT')"
" ELSE raise(ABORT,'NOT NULL constraint failed')"
" END"
" FROM pragma_quick_check(%Q,%Q)"
- " WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'",
+ " WHERE quick_check GLOB 'CHECK*'"
+ " OR quick_check GLOB 'NULL*'"
+ " OR quick_check GLOB 'non-* value in*'",
zTab, zDb
);
}
@@ -113533,7 +115907,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
pNew->u.tab.pDfltList = sqlite3ExprListDup(db, pTab->u.tab.pDfltList, 0);
pNew->pSchema = db->aDb[iDb].pSchema;
pNew->u.tab.addColOffset = pTab->u.tab.addColOffset;
- pNew->nTabRef = 1;
+ assert( pNew->nTabRef==1 );
exit_begin_add_column:
sqlite3SrcListDelete(db, pSrc);
@@ -114038,7 +116412,7 @@ static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){
}
/*
-** An error occured while parsing or otherwise processing a database
+** An error occurred while parsing or otherwise processing a database
** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an
** ALTER TABLE RENAME COLUMN program. The error message emitted by the
** sub-routine is currently stored in pParse->zErrMsg. This function
@@ -115559,9 +117933,9 @@ static void openStatTable(
typedef struct StatAccum StatAccum;
typedef struct StatSample StatSample;
struct StatSample {
- tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anDLt; /* sqlite_stat4.nDLt */
#ifdef SQLITE_ENABLE_STAT4
+ tRowcnt *anEq; /* sqlite_stat4.nEq */
tRowcnt *anLt; /* sqlite_stat4.nLt */
union {
i64 iRowid; /* Rowid in main table of the key */
@@ -115719,9 +118093,9 @@ static void statInit(
/* Allocate the space required for the StatAccum object */
n = sizeof(*p)
- + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */
- + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
+ + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */
#ifdef SQLITE_ENABLE_STAT4
+ n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */
if( mxSample ){
n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */
+ sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */
@@ -115742,9 +118116,9 @@ static void statInit(
p->nKeyCol = nKeyCol;
p->nSkipAhead = 0;
p->current.anDLt = (tRowcnt*)&p[1];
- p->current.anEq = &p->current.anDLt[nColUp];
#ifdef SQLITE_ENABLE_STAT4
+ p->current.anEq = &p->current.anDLt[nColUp];
p->mxSample = p->nLimit==0 ? mxSample : 0;
if( mxSample ){
u8 *pSpace; /* Allocated space not yet assigned */
@@ -116011,7 +118385,9 @@ static void statPush(
if( p->nRow==0 ){
/* This is the first call to this function. Do initialization. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; i<p->nCol; i++) p->current.anEq[i] = 1;
+#endif
}else{
/* Second and subsequent calls get processed here */
#ifdef SQLITE_ENABLE_STAT4
@@ -116020,15 +118396,17 @@ static void statPush(
/* Update anDLt[], anLt[] and anEq[] to reflect the values that apply
** to the current row of the index. */
+#ifdef SQLITE_ENABLE_STAT4
for(i=0; i<iChng; i++){
p->current.anEq[i]++;
}
+#endif
for(i=iChng; i<p->nCol; i++){
p->current.anDLt[i]++;
#ifdef SQLITE_ENABLE_STAT4
if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i];
-#endif
p->current.anEq[i] = 1;
+#endif
}
}
@@ -116162,7 +118540,9 @@ static void statGet(
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1;
sqlite3_str_appendf(&sStat, " %llu", iVal);
+#ifdef SQLITE_ENABLE_STAT4
assert( p->current.anEq[i] );
+#endif
}
sqlite3ResultStrAccum(context, &sStat);
}
@@ -116851,6 +119231,16 @@ static void decodeIntArray(
while( z[0]!=0 && z[0]!=' ' ) z++;
while( z[0]==' ' ) z++;
}
+
+ /* Set the bLowQual flag if the peak number of rows obtained
+ ** from a full equality match is so large that a full table scan
+ ** seems likely to be faster than using the index.
+ */
+ if( aLog[0] > 66 /* Index has more than 100 rows */
+ && aLog[0] <= aLog[nOut-1] /* And only a single value seen */
+ ){
+ pIndex->bLowQual = 1;
+ }
}
}
@@ -117144,14 +119534,15 @@ static int loadStatTbl(
decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0);
decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0);
- /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer.
+ /* Take a copy of the sample. Add 8 extra 0x00 bytes the end of the buffer.
** This is in case the sample record is corrupted. In that case, the
** sqlite3VdbeRecordCompare() may read up to two varints past the
** end of the allocated buffer before it realizes it is dealing with
- ** a corrupt record. Adding the two 0x00 bytes prevents this from causing
+ ** a corrupt record. Or it might try to read a large integer from the
+ ** buffer. In any case, eight 0x00 bytes prevents this from causing
** a buffer overread. */
pSample->n = sqlite3_column_bytes(pStmt, 4);
- pSample->p = sqlite3DbMallocZero(db, pSample->n + 2);
+ pSample->p = sqlite3DbMallocZero(db, pSample->n + 8);
if( pSample->p==0 ){
sqlite3_finalize(pStmt);
return SQLITE_NOMEM_BKPT;
@@ -118109,7 +120500,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck(
sqlite3 *db = pParse->db;
int rc;
- /* Don't do any authorization checks if the database is initialising
+ /* Don't do any authorization checks if the database is initializing
** or if the parser is being invoked from within sqlite3_declare_vtab.
*/
assert( !IN_RENAME_OBJECT || db->xAuth==0 );
@@ -118410,29 +120801,26 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
pParse->nVtabLock = 0;
#endif
+#ifndef SQLITE_OMIT_SHARED_CACHE
/* Once all the cookies have been verified and transactions opened,
** obtain the required table-locks. This is a no-op unless the
** shared-cache feature is enabled.
*/
- codeTableLocks(pParse);
+ if( pParse->nTableLock ) codeTableLocks(pParse);
+#endif
/* Initialize any AUTOINCREMENT data structures required.
*/
- sqlite3AutoincrementBegin(pParse);
+ if( pParse->pAinc ) sqlite3AutoincrementBegin(pParse);
- /* Code constant expressions that where factored out of inner loops.
- **
- ** The pConstExpr list might also contain expressions that we simply
- ** want to keep around until the Parse object is deleted. Such
- ** expressions have iConstExprReg==0. Do not generate code for
- ** those expressions, of course.
+ /* Code constant expressions that were factored out of inner loops.
*/
if( pParse->pConstExpr ){
ExprList *pEL = pParse->pConstExpr;
pParse->okConstFactor = 0;
for(i=0; i<pEL->nExpr; i++){
- int iReg = pEL->a[i].u.iConstExprReg;
- sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg);
+ assert( pEL->a[i].u.iConstExprReg>0 );
+ sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg);
}
}
@@ -118899,7 +121287,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetExpr(
*/
SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){
if( pCol->iDflt==0 ) return 0;
- if( NEVER(!IsOrdinaryTable(pTab)) ) return 0;
+ if( !IsOrdinaryTable(pTab) ) return 0;
if( NEVER(pTab->u.tab.pDfltList==0) ) return 0;
if( NEVER(pTab->u.tab.pDfltList->nExpr<pCol->iDflt) ) return 0;
return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr;
@@ -118931,7 +121319,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetColl(
}
/*
-** Return the collating squence name for a column
+** Return the collating sequence name for a column
*/
SQLITE_PRIVATE const char *sqlite3ColumnColl(Column *pCol){
const char *z;
@@ -119052,6 +121440,9 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return;
deleteTable(db, pTable);
}
+SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){
+ sqlite3DeleteTable(db, (Table*)pTable);
+}
/*
@@ -119587,19 +121978,13 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){
#endif
/*
-** Name of the special TEMP trigger used to implement RETURNING. The
-** name begins with "sqlite_" so that it is guaranteed not to collide
-** with any application-generated triggers.
-*/
-#define RETURNING_TRIGGER_NAME "sqlite_returning"
-
-/*
** Clean up the data structures associated with the RETURNING clause.
*/
-static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){
+static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){
+ Returning *pRet = (Returning*)pArg;
Hash *pHash;
pHash = &(db->aDb[1].pSchema->trigHash);
- sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0);
+ sqlite3HashInsert(pHash, pRet->zName, 0);
sqlite3ExprListDelete(db, pRet->pReturnEL);
sqlite3DbFree(db, pRet);
}
@@ -119638,11 +122023,12 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
pParse->u1.pReturning = pRet;
pRet->pParse = pParse;
pRet->pReturnEL = pList;
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet);
+ sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet);
testcase( pParse->earlyCleanup );
if( db->mallocFailed ) return;
- pRet->retTrig.zName = RETURNING_TRIGGER_NAME;
+ sqlite3_snprintf(sizeof(pRet->zName), pRet->zName,
+ "sqlite_returning_%p", pParse);
+ pRet->retTrig.zName = pRet->zName;
pRet->retTrig.op = TK_RETURNING;
pRet->retTrig.tr_tm = TRIGGER_AFTER;
pRet->retTrig.bReturning = 1;
@@ -119653,9 +122039,9 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
pRet->retTStep.pTrig = &pRet->retTrig;
pRet->retTStep.pExprList = pList;
pHash = &(db->aDb[1].pSchema->trigHash);
- assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0
+ assert( sqlite3HashFind(pHash, pRet->zName)==0
|| pParse->nErr || pParse->ifNotExists );
- if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig)
+ if( sqlite3HashInsert(pHash, pRet->zName, &pRet->retTrig)
==&pRet->retTrig ){
sqlite3OomFault(db);
}
@@ -119689,7 +122075,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){
}
if( !IN_RENAME_OBJECT ) sqlite3DequoteToken(&sName);
- /* Because keywords GENERATE ALWAYS can be converted into indentifiers
+ /* Because keywords GENERATE ALWAYS can be converted into identifiers
** by the parser, we can sometimes end up with a typename that ends
** with "generated always". Check for this case and omit the surplus
** text. */
@@ -119836,7 +122222,8 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){
assert( zIn!=0 );
while( zIn[0] ){
- h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
+ u8 x = *(u8*)zIn;
+ h = (h<<8) + sqlite3UpperToLower[x];
zIn++;
if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */
aff = SQLITE_AFF_TEXT;
@@ -119910,7 +122297,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(
Parse *pParse, /* Parsing context */
Expr *pExpr, /* The parsed expression of the default value */
const char *zStart, /* Start of the default value text */
- const char *zEnd /* First character past end of defaut value text */
+ const char *zEnd /* First character past end of default value text */
){
Table *p;
Column *pCol;
@@ -120258,7 +122645,7 @@ static int identLength(const char *z){
** to the specified offset in the buffer and updates *pIdx to refer
** to the first byte after the last byte written before returning.
**
-** If the string zSignedIdent consists entirely of alpha-numeric
+** If the string zSignedIdent consists entirely of alphanumeric
** characters, does not begin with a digit and is not an SQL keyword,
** then it is copied to the output buffer exactly as it is. Otherwise,
** it is quoted using double-quotes.
@@ -120410,7 +122797,7 @@ static void estimateIndexWidth(Index *pIdx){
for(i=0; i<pIdx->nColumn; i++){
i16 x = pIdx->aiColumn[i];
assert( x<pIdx->pTable->nCol );
- wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst;
+ wIndex += x<0 ? 1 : aCol[x].szEst;
}
pIdx->szIdxRow = sqlite3LogEst(wIndex*4);
}
@@ -121099,6 +123486,17 @@ SQLITE_PRIVATE void sqlite3EndTable(
/* Reparse everything to update our internal data structures */
sqlite3VdbeAddParseSchemaOp(v, iDb,
sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0);
+
+ /* Test for cycles in generated columns and illegal expressions
+ ** in CHECK constraints and in DEFAULT clauses. */
+ if( p->tabFlags & TF_HasGenerated ){
+ sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0,
+ sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"",
+ db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC);
+ }
+ sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0,
+ sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)",
+ db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC);
}
/* Add the table to the in-memory representation of the database.
@@ -122148,7 +124546,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
#ifndef SQLITE_OMIT_TEMPDB
/* If the index name was unqualified, check if the table
** is a temp table. If so, set the database to 1. Do not do this
- ** if initialising a database schema.
+ ** if initializing a database schema.
*/
if( !db->init.busy ){
pTab = sqlite3SrcListLookup(pParse, pTblName);
@@ -123690,7 +126088,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
if( iDb<0 ) return;
z = sqlite3NameFromToken(db, pObjName);
if( z==0 ) return;
- zDb = db->aDb[iDb].zDbSName;
+ zDb = pName2->n ? db->aDb[iDb].zDbSName : 0;
pTab = sqlite3FindTable(db, z, zDb);
if( pTab ){
reindexTable(pParse, pTab, 0);
@@ -123700,6 +126098,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
pIndex = sqlite3FindIndex(db, z, zDb);
sqlite3DbFree(db, z);
if( pIndex ){
+ iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema);
sqlite3BeginWriteOperation(pParse, 0, iDb);
sqlite3RefillIndex(pParse, pIndex, -1);
return;
@@ -123805,7 +126204,7 @@ SQLITE_PRIVATE void sqlite3CteDelete(sqlite3 *db, Cte *pCte){
/*
** This routine is invoked once per CTE by the parser while parsing a
-** WITH clause. The CTE described by teh third argument is added to
+** WITH clause. The CTE described by the third argument is added to
** the WITH clause of the second argument. If the second argument is
** NULL, then a new WITH argument is created.
*/
@@ -123865,6 +126264,9 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){
sqlite3DbFree(db, pWith);
}
}
+SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){
+ sqlite3WithDelete(db, (With*)pWith);
+}
#endif /* !defined(SQLITE_OMIT_CTE) */
/************** End of build.c ***********************************************/
@@ -124447,8 +126849,9 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){
Table *pTab;
assert( pItem && pSrc->nSrc>=1 );
pTab = sqlite3LocateTableItem(pParse, 0, pItem);
- sqlite3DeleteTable(pParse->db, pItem->pTab);
+ if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab);
pItem->pTab = pTab;
+ pItem->fg.notCte = 1;
if( pTab ){
pTab->nTabRef++;
if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){
@@ -124601,7 +127004,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
sqlite3 *db = pParse->db;
Expr *pLhs = NULL; /* LHS of IN(SELECT...) operator */
Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */
- ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */
+ ExprList *pEList = NULL; /* Expression list containing only pSelectRowid*/
SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */
Select *pSelect = NULL; /* Complete SELECT tree */
Table *pTab;
@@ -124639,14 +127042,20 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
);
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ assert( pPk!=0 );
+ assert( pPk->nKeyCol>=1 );
if( pPk->nKeyCol==1 ){
- const char *zName = pTab->aCol[pPk->aiColumn[0]].zCnName;
+ const char *zName;
+ assert( pPk->aiColumn[0]>=0 && pPk->aiColumn[0]<pTab->nCol );
+ zName = pTab->aCol[pPk->aiColumn[0]].zCnName;
pLhs = sqlite3Expr(db, TK_ID, zName);
pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName));
}else{
int i;
for(i=0; i<pPk->nKeyCol; i++){
- Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName);
+ Expr *p;
+ assert( pPk->aiColumn[i]>=0 && pPk->aiColumn[i]<pTab->nCol );
+ p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName);
pEList = sqlite3ExprListAppend(pParse, pEList, p);
}
pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
@@ -124675,7 +127084,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
pOrderBy,0,pLimit
);
- /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */
+ /* now generate the new WHERE rowid IN clause for the DELETE/UPDATE */
pInClause = sqlite3PExpr(pParse, TK_IN, pLhs, 0);
sqlite3PExprAddSelect(pParse, pInClause, pSelect);
return pInClause;
@@ -124904,7 +127313,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( HasRowid(pTab) ){
/* For a rowid table, initialize the RowSet to an empty set */
pPk = 0;
- nPk = 1;
+ assert( nPk==1 );
iRowSet = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet);
}else{
@@ -124932,7 +127341,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( pWInfo==0 ) goto delete_from_cleanup;
eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI );
- assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF );
+ assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF
+ || OptimizationDisabled(db, SQLITE_OnePass) );
if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse);
if( sqlite3WhereUsesDeferredSeek(pWInfo) ){
sqlite3VdbeAddOp1(v, OP_FinishSeek, iTabCur);
@@ -125269,9 +127679,11 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0);
/* Invoke AFTER DELETE trigger programs. */
- sqlite3CodeRowTrigger(pParse, pTrigger,
- TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel
- );
+ if( pTrigger ){
+ sqlite3CodeRowTrigger(pParse, pTrigger,
+ TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel
+ );
+ }
/* Jump here if the row had already been deleted before any BEFORE
** trigger programs were invoked. Or if a trigger program throws a
@@ -125585,6 +127997,42 @@ static void lengthFunc(
}
/*
+** Implementation of the octet_length() function
+*/
+static void bytelengthFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ assert( argc==1 );
+ UNUSED_PARAMETER(argc);
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_BLOB: {
+ sqlite3_result_int(context, sqlite3_value_bytes(argv[0]));
+ break;
+ }
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT: {
+ i64 m = sqlite3_context_db_handle(context)->enc<=SQLITE_UTF8 ? 1 : 2;
+ sqlite3_result_int64(context, sqlite3_value_bytes(argv[0])*m);
+ break;
+ }
+ case SQLITE_TEXT: {
+ if( sqlite3_value_encoding(argv[0])<=SQLITE_UTF8 ){
+ sqlite3_result_int(context, sqlite3_value_bytes(argv[0]));
+ }else{
+ sqlite3_result_int(context, sqlite3_value_bytes16(argv[0]));
+ }
+ break;
+ }
+ default: {
+ sqlite3_result_null(context);
+ break;
+ }
+ }
+}
+
+/*
** Implementation of the abs() function.
**
** IMP: R-23979-26855 The abs(X) function returns the absolute value of
@@ -125860,7 +128308,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){
}else if( n==0 ){
r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5)));
}else{
- zBuf = sqlite3_mprintf("%.*f",n,r);
+ zBuf = sqlite3_mprintf("%!.*f",n,r);
if( zBuf==0 ){
sqlite3_result_error_nomem(context);
return;
@@ -126060,7 +128508,7 @@ struct compareInfo {
/*
** For LIKE and GLOB matching on EBCDIC machines, assume that every
-** character is exactly one byte in size. Also, provde the Utf8Read()
+** character is exactly one byte in size. Also, provide the Utf8Read()
** macro for fast reading of the next character in the common case where
** the next character is ASCII.
*/
@@ -126293,7 +128741,7 @@ SQLITE_API int sqlite3_like_count = 0;
/*
** Implementation of the like() SQL function. This function implements
-** the build-in LIKE operator. The first argument to the function is the
+** the built-in LIKE operator. The first argument to the function is the
** pattern and the second argument is the string. So, the SQL statements:
**
** A LIKE B
@@ -126626,6 +129074,7 @@ static void charFunc(
*zOut++ = 0x80 + (u8)(c & 0x3F);
} \
}
+ *zOut = 0;
sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8);
}
@@ -126654,7 +129103,8 @@ static void hexFunc(
*(z++) = hexdigits[c&0xf];
}
*z = 0;
- sqlite3_result_text(context, zHex, n*2, sqlite3_free);
+ sqlite3_result_text64(context, zHex, (u64)(z-zHex),
+ sqlite3_free, SQLITE_UTF8);
}
}
@@ -126679,7 +129129,7 @@ static int strContainsChar(const u8 *zStr, int nStr, u32 ch){
** decoded and returned as a blob.
**
** If there is only a single argument, then it must consist only of an
-** even number of hexadeximal digits. Otherwise, return NULL.
+** even number of hexadecimal digits. Otherwise, return NULL.
**
** Or, if there is a second argument, then any character that appears in
** the second argument is also allowed to appear between pairs of hexadecimal
@@ -126948,6 +129398,81 @@ static void trimFunc(
sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT);
}
+/* The core implementation of the CONCAT(...) and CONCAT_WS(SEP,...)
+** functions.
+**
+** Return a string value that is the concatenation of all non-null
+** entries in argv[]. Use zSep as the separator.
+*/
+static void concatFuncCore(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv,
+ int nSep,
+ const char *zSep
+){
+ i64 j, k, n = 0;
+ int i;
+ char *z;
+ for(i=0; i<argc; i++){
+ n += sqlite3_value_bytes(argv[i]);
+ }
+ n += (argc-1)*nSep;
+ z = sqlite3_malloc64(n+1);
+ if( z==0 ){
+ sqlite3_result_error_nomem(context);
+ return;
+ }
+ j = 0;
+ for(i=0; i<argc; i++){
+ k = sqlite3_value_bytes(argv[i]);
+ if( k>0 ){
+ const char *v = (const char*)sqlite3_value_text(argv[i]);
+ if( v!=0 ){
+ if( j>0 && nSep>0 ){
+ memcpy(&z[j], zSep, nSep);
+ j += nSep;
+ }
+ memcpy(&z[j], v, k);
+ j += k;
+ }
+ }
+ }
+ z[j] = 0;
+ assert( j<=n );
+ sqlite3_result_text64(context, z, j, sqlite3_free, SQLITE_UTF8);
+}
+
+/*
+** The CONCAT(...) function. Generate a string result that is the
+** concatentation of all non-null arguments.
+*/
+static void concatFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ concatFuncCore(context, argc, argv, 0, "");
+}
+
+/*
+** The CONCAT_WS(separator, ...) function.
+**
+** Generate a string that is the concatenation of 2nd through the Nth
+** argument. Use the first argument (which must be non-NULL) as the
+** separator.
+*/
+static void concatwsFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ int nSep = sqlite3_value_bytes(argv[0]);
+ const char *zSep = (const char*)sqlite3_value_text(argv[0]);
+ if( zSep==0 ) return;
+ concatFuncCore(context, argc-1, argv+1, nSep, zSep);
+}
+
#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
/*
@@ -127069,14 +129594,69 @@ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){
*/
typedef struct SumCtx SumCtx;
struct SumCtx {
- double rSum; /* Floating point sum */
- i64 iSum; /* Integer sum */
+ double rSum; /* Running sum as as a double */
+ double rErr; /* Error term for Kahan-Babushka-Neumaier summation */
+ i64 iSum; /* Running sum as a signed integer */
i64 cnt; /* Number of elements summed */
- u8 overflow; /* True if integer overflow seen */
- u8 approx; /* True if non-integer value was input to the sum */
+ u8 approx; /* True if any non-integer value was input to the sum */
+ u8 ovrfl; /* Integer overflow seen */
};
/*
+** Do one step of the Kahan-Babushka-Neumaier summation.
+**
+** https://en.wikipedia.org/wiki/Kahan_summation_algorithm
+**
+** Variables are marked "volatile" to defeat c89 x86 floating point
+** optimizations can mess up this algorithm.
+*/
+static void kahanBabuskaNeumaierStep(
+ volatile SumCtx *pSum,
+ volatile double r
+){
+ volatile double s = pSum->rSum;
+ volatile double t = s + r;
+ if( fabs(s) > fabs(r) ){
+ pSum->rErr += (s - t) + r;
+ }else{
+ pSum->rErr += (r - t) + s;
+ }
+ pSum->rSum = t;
+}
+
+/*
+** Add a (possibly large) integer to the running sum.
+*/
+static void kahanBabuskaNeumaierStepInt64(volatile SumCtx *pSum, i64 iVal){
+ if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){
+ i64 iBig, iSm;
+ iSm = iVal % 16384;
+ iBig = iVal - iSm;
+ kahanBabuskaNeumaierStep(pSum, iBig);
+ kahanBabuskaNeumaierStep(pSum, iSm);
+ }else{
+ kahanBabuskaNeumaierStep(pSum, (double)iVal);
+ }
+}
+
+/*
+** Initialize the Kahan-Babaska-Neumaier sum from a 64-bit integer
+*/
+static void kahanBabuskaNeumaierInit(
+ volatile SumCtx *p,
+ i64 iVal
+){
+ if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){
+ i64 iSm = iVal % 16384;
+ p->rSum = (double)(iVal - iSm);
+ p->rErr = (double)iSm;
+ }else{
+ p->rSum = (double)iVal;
+ p->rErr = 0.0;
+ }
+}
+
+/*
** Routines used to compute the sum, average, and total.
**
** The SUM() function follows the (broken) SQL standard which means
@@ -127095,15 +129675,29 @@ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){
type = sqlite3_value_numeric_type(argv[0]);
if( p && type!=SQLITE_NULL ){
p->cnt++;
- if( type==SQLITE_INTEGER ){
- i64 v = sqlite3_value_int64(argv[0]);
- p->rSum += v;
- if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){
- p->approx = p->overflow = 1;
+ if( p->approx==0 ){
+ if( type!=SQLITE_INTEGER ){
+ kahanBabuskaNeumaierInit(p, p->iSum);
+ p->approx = 1;
+ kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0]));
+ }else{
+ i64 x = p->iSum;
+ if( sqlite3AddInt64(&x, sqlite3_value_int64(argv[0]))==0 ){
+ p->iSum = x;
+ }else{
+ p->ovrfl = 1;
+ kahanBabuskaNeumaierInit(p, p->iSum);
+ p->approx = 1;
+ kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0]));
+ }
}
}else{
- p->rSum += sqlite3_value_double(argv[0]);
- p->approx = 1;
+ if( type==SQLITE_INTEGER ){
+ kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0]));
+ }else{
+ p->ovrfl = 0;
+ kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0]));
+ }
}
}
}
@@ -127120,13 +129714,18 @@ static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){
if( ALWAYS(p) && type!=SQLITE_NULL ){
assert( p->cnt>0 );
p->cnt--;
- assert( type==SQLITE_INTEGER || p->approx );
- if( type==SQLITE_INTEGER && p->approx==0 ){
- i64 v = sqlite3_value_int64(argv[0]);
- p->rSum -= v;
- p->iSum -= v;
+ if( !p->approx ){
+ p->iSum -= sqlite3_value_int64(argv[0]);
+ }else if( type==SQLITE_INTEGER ){
+ i64 iVal = sqlite3_value_int64(argv[0]);
+ if( iVal!=SMALLEST_INT64 ){
+ kahanBabuskaNeumaierStepInt64(p, -iVal);
+ }else{
+ kahanBabuskaNeumaierStepInt64(p, LARGEST_INT64);
+ kahanBabuskaNeumaierStepInt64(p, 1);
+ }
}else{
- p->rSum -= sqlite3_value_double(argv[0]);
+ kahanBabuskaNeumaierStep(p, -sqlite3_value_double(argv[0]));
}
}
}
@@ -127137,10 +129736,14 @@ static void sumFinalize(sqlite3_context *context){
SumCtx *p;
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
- if( p->overflow ){
- sqlite3_result_error(context,"integer overflow",-1);
- }else if( p->approx ){
- sqlite3_result_double(context, p->rSum);
+ if( p->approx ){
+ if( p->ovrfl ){
+ sqlite3_result_error(context,"integer overflow",-1);
+ }else if( !sqlite3IsNaN(p->rErr) ){
+ sqlite3_result_double(context, p->rSum+p->rErr);
+ }else{
+ sqlite3_result_double(context, p->rSum);
+ }
}else{
sqlite3_result_int64(context, p->iSum);
}
@@ -127150,14 +129753,29 @@ static void avgFinalize(sqlite3_context *context){
SumCtx *p;
p = sqlite3_aggregate_context(context, 0);
if( p && p->cnt>0 ){
- sqlite3_result_double(context, p->rSum/(double)p->cnt);
+ double r;
+ if( p->approx ){
+ r = p->rSum;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
+ }else{
+ r = (double)(p->iSum);
+ }
+ sqlite3_result_double(context, r/(double)p->cnt);
}
}
static void totalFinalize(sqlite3_context *context){
SumCtx *p;
+ double r = 0.0;
p = sqlite3_aggregate_context(context, 0);
- /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
- sqlite3_result_double(context, p ? p->rSum : (double)0);
+ if( p ){
+ if( p->approx ){
+ r = p->rSum;
+ if( !sqlite3IsNaN(p->rErr) ) r += p->rErr;
+ }else{
+ r = (double)(p->iSum);
+ }
+ }
+ sqlite3_result_double(context, r);
}
/*
@@ -127276,6 +129894,7 @@ static void minMaxFinalize(sqlite3_context *context){
/*
** group_concat(EXPR, ?SEPARATOR?)
+** string_agg(EXPR, SEPARATOR)
**
** The SEPARATOR goes before the EXPR string. This is tragic. The
** groupConcatInverse() implementation would have been easier if the
@@ -127379,7 +129998,7 @@ static void groupConcatInverse(
if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC));
/* pGCC is always non-NULL since groupConcatStep() will have always
- ** run frist to initialize it */
+ ** run first to initialize it */
if( ALWAYS(pGCC) ){
int nVS;
/* Must call sqlite3_value_text() to convert the argument into text prior
@@ -127463,8 +130082,10 @@ SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3 *db){
** sensitive.
*/
SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){
+ FuncDef *pDef;
struct compareInfo *pInfo;
int flags;
+ int nArg;
if( caseSensitive ){
pInfo = (struct compareInfo*)&likeInfoAlt;
flags = SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE;
@@ -127472,10 +130093,13 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive)
pInfo = (struct compareInfo*)&likeInfoNorm;
flags = SQLITE_FUNC_LIKE;
}
- sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
- sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
- sqlite3FindFunction(db, "like", 2, SQLITE_UTF8, 0)->funcFlags |= flags;
- sqlite3FindFunction(db, "like", 3, SQLITE_UTF8, 0)->funcFlags |= flags;
+ for(nArg=2; nArg<=3; nArg++){
+ sqlite3CreateFunc(db, "like", nArg, SQLITE_UTF8, pInfo, likeFunc,
+ 0, 0, 0, 0, 0);
+ pDef = sqlite3FindFunction(db, "like", nArg, SQLITE_UTF8, 0);
+ pDef->funcFlags |= flags;
+ pDef->funcFlags &= ~SQLITE_FUNC_UNSAFE;
+ }
}
/*
@@ -127747,6 +130371,37 @@ static void signFunc(
sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0);
}
+#ifdef SQLITE_DEBUG
+/*
+** Implementation of fpdecode(x,y,z) function.
+**
+** x is a real number that is to be decoded. y is the precision.
+** z is the maximum real precision.
+*/
+static void fpdecodeFunc(
+ sqlite3_context *context,
+ int argc,
+ sqlite3_value **argv
+){
+ FpDecode s;
+ double x;
+ int y, z;
+ char zBuf[100];
+ UNUSED_PARAMETER(argc);
+ assert( argc==3 );
+ x = sqlite3_value_double(argv[0]);
+ y = sqlite3_value_int(argv[1]);
+ z = sqlite3_value_int(argv[2]);
+ sqlite3FpDecode(&s, x, y, z);
+ if( s.isSpecial==2 ){
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN");
+ }else{
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "%c%.*s/%d", s.sign, s.n, s.z, s.iDP);
+ }
+ sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
+}
+#endif /* SQLITE_DEBUG */
+
/*
** All of the FuncDef structures in the aBuiltinFunc[] array above
** to the global function hash table. This occurs at start-time (as
@@ -127811,12 +130466,16 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(subtype, 1, 0, 0, subtypeFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH),
+ FUNCTION2(octet_length, 1, 0, 0, bytelengthFunc,SQLITE_FUNC_BYTELEN),
FUNCTION(instr, 2, 0, 0, instrFunc ),
FUNCTION(printf, -1, 0, 0, printfFunc ),
FUNCTION(format, -1, 0, 0, printfFunc ),
FUNCTION(unicode, 1, 0, 0, unicodeFunc ),
FUNCTION(char, -1, 0, 0, charFunc ),
FUNCTION(abs, 1, 0, 0, absFunc ),
+#ifdef SQLITE_DEBUG
+ FUNCTION(fpdecode, 3, 0, 0, fpdecodeFunc ),
+#endif
#ifndef SQLITE_OMIT_FLOATING_POINT
FUNCTION(round, 1, 0, 0, roundFunc ),
FUNCTION(round, 2, 0, 0, roundFunc ),
@@ -127826,6 +130485,11 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION(hex, 1, 0, 0, hexFunc ),
FUNCTION(unhex, 1, 0, 0, unhexFunc ),
FUNCTION(unhex, 2, 0, 0, unhexFunc ),
+ FUNCTION(concat, -1, 0, 0, concatFunc ),
+ FUNCTION(concat, 0, 0, 0, 0 ),
+ FUNCTION(concat_ws, -1, 0, 0, concatwsFunc ),
+ FUNCTION(concat_ws, 0, 0, 0, 0 ),
+ FUNCTION(concat_ws, 1, 0, 0, 0 ),
INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ),
VFUNCTION(random, 0, 0, 0, randomFunc ),
VFUNCTION(randomblob, 1, 0, 0, randomBlob ),
@@ -127855,6 +130519,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep,
groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
+ WAGGREGATE(string_agg, 2, 0, 0, groupConcatStep,
+ groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
#ifdef SQLITE_CASE_SENSITIVE_LIKE
@@ -128797,6 +131463,7 @@ static int isSetNullAction(Parse *pParse, FKey *pFKey){
if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull)
|| (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull)
){
+ assert( (pTop->db->flags & SQLITE_FkNoAction)==0 );
return 1;
}
}
@@ -128991,6 +131658,8 @@ SQLITE_PRIVATE void sqlite3FkCheck(
}
if( regOld!=0 ){
int eAction = pFKey->aAction[aChange!=0];
+ if( (db->flags & SQLITE_FkNoAction) ) eAction = OE_None;
+
fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1);
/* If this is a deferred FK constraint, or a CASCADE or SET NULL
** action applies, then any foreign key violations caused by
@@ -129106,7 +131775,11 @@ SQLITE_PRIVATE int sqlite3FkRequired(
/* Check if any parent key columns are being modified. */
for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
if( fkParentIsModified(pTab, p, aChange, chngRowid) ){
- if( p->aAction[1]!=OE_None ) return 2;
+ if( (pParse->db->flags & SQLITE_FkNoAction)==0
+ && p->aAction[1]!=OE_None
+ ){
+ return 2;
+ }
bHaveFK = 1;
}
}
@@ -129156,6 +131829,7 @@ static Trigger *fkActionTrigger(
int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */
action = pFKey->aAction[iAction];
+ if( (db->flags & SQLITE_FkNoAction) ) action = OE_None;
if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){
return 0;
}
@@ -129387,9 +132061,8 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
if( pFKey->pPrevTo ){
pFKey->pPrevTo->pNextTo = pFKey->pNextTo;
}else{
- void *p = (void *)pFKey->pNextTo;
- const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo);
- sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p);
+ const char *z = (pFKey->pNextTo ? pFKey->pNextTo->zTo : pFKey->zTo);
+ sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, pFKey->pNextTo);
}
if( pFKey->pNextTo ){
pFKey->pNextTo->pPrevTo = pFKey->pPrevTo;
@@ -129452,8 +132125,10 @@ SQLITE_PRIVATE void sqlite3OpenTable(
assert( pParse->pVdbe!=0 );
v = pParse->pVdbe;
assert( opcode==OP_OpenWrite || opcode==OP_OpenRead );
- sqlite3TableLock(pParse, iDb, pTab->tnum,
- (opcode==OP_OpenWrite)?1:0, pTab->zName);
+ if( !pParse->db->noSharedCache ){
+ sqlite3TableLock(pParse, iDb, pTab->tnum,
+ (opcode==OP_OpenWrite)?1:0, pTab->zName);
+ }
if( HasRowid(pTab) ){
sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nNVCol);
VdbeComment((v, "%s", pTab->zName));
@@ -129582,7 +132257,7 @@ SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3 *db, const Table *pTab){
** For STRICT tables:
** ------------------
**
-** Generate an appropropriate OP_TypeCheck opcode that will verify the
+** Generate an appropriate OP_TypeCheck opcode that will verify the
** datatypes against the column definitions in pTab. If iReg==0, that
** means an OP_MakeRecord opcode has already been generated and should be
** the last opcode generated. The new OP_TypeCheck needs to be inserted
@@ -130874,7 +133549,7 @@ insert_cleanup:
/* This is the Walker callback from sqlite3ExprReferencesUpdatedColumn().
* Set bit 0x01 of pWalker->eCode if pWalker->eCode to 0 and if this
** expression node references any of the
-** columns that are being modifed by an UPDATE statement.
+** columns that are being modified by an UPDATE statement.
*/
static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){
if( pExpr->op==TK_COLUMN ){
@@ -131097,7 +133772,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int *aiChng, /* column i is unchanged if aiChng[i]<0 */
Upsert *pUpsert /* ON CONFLICT clauses, if any. NULL otherwise */
){
- Vdbe *v; /* VDBE under constrution */
+ Vdbe *v; /* VDBE under construction */
Index *pIdx; /* Pointer to one of the indices */
Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */
sqlite3 *db; /* Database connection */
@@ -131580,7 +134255,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
pIdx;
pIdx = indexIteratorNext(&sIdxIter, &ix)
){
- int regIdx; /* Range of registers hold conent for pIdx */
+ int regIdx; /* Range of registers holding content for pIdx */
int regR; /* Range of registers holding conflicting PK */
int iThisCur; /* Cursor for this UNIQUE index */
int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */
@@ -132075,6 +134750,8 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
assert( op==OP_OpenRead || op==OP_OpenWrite );
assert( op==OP_OpenWrite || p5==0 );
+ assert( piDataCur!=0 );
+ assert( piIdxCur!=0 );
if( IsVirtual(pTab) ){
/* This routine is a no-op for virtual tables. Leave the output
** variables *piDataCur and *piIdxCur set to illegal cursor numbers
@@ -132087,18 +134764,18 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
assert( v!=0 );
if( iBase<0 ) iBase = pParse->nTab;
iDataCur = iBase++;
- if( piDataCur ) *piDataCur = iDataCur;
+ *piDataCur = iDataCur;
if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){
sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op);
- }else{
+ }else if( pParse->db->noSharedCache==0 ){
sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName);
}
- if( piIdxCur ) *piIdxCur = iBase;
+ *piIdxCur = iBase;
for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
int iIdxCur = iBase++;
assert( pIdx->pSchema==pTab->pSchema );
if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
- if( piDataCur ) *piDataCur = iIdxCur;
+ *piDataCur = iIdxCur;
p5 = 0;
}
if( aToOpen==0 || aToOpen[i+1] ){
@@ -132396,7 +135073,7 @@ static int xferOptimization(
}
#endif
#ifndef SQLITE_OMIT_FOREIGN_KEY
- /* Disallow the transfer optimization if the destination table constains
+ /* Disallow the transfer optimization if the destination table contains
** any foreign key constraints. This is more restrictive than necessary.
** But the main beneficiary of the transfer optimization is the VACUUM
** command, and the VACUUM command disables foreign key constraints. So
@@ -133106,6 +135783,11 @@ struct sqlite3_api_routines {
int (*value_encoding)(sqlite3_value*);
/* Version 3.41.0 and later */
int (*is_interrupted)(sqlite3*);
+ /* Version 3.43.0 and later */
+ int (*stmt_explain)(sqlite3_stmt*,int);
+ /* Version 3.44.0 and later */
+ void *(*get_clientdata)(sqlite3*,const char*);
+ int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
};
/*
@@ -133434,6 +136116,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_value_encoding sqlite3_api->value_encoding
/* Version 3.41.0 and later */
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
+/* Version 3.43.0 and later */
+#define sqlite3_stmt_explain sqlite3_api->stmt_explain
+/* Version 3.44.0 and later */
+#define sqlite3_get_clientdata sqlite3_api->get_clientdata
+#define sqlite3_set_clientdata sqlite3_api->set_clientdata
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
@@ -133950,7 +136637,12 @@ static const sqlite3_api_routines sqlite3Apis = {
/* Version 3.40.0 and later */
sqlite3_value_encoding,
/* Version 3.41.0 and later */
- sqlite3_is_interrupted
+ sqlite3_is_interrupted,
+ /* Version 3.43.0 and later */
+ sqlite3_stmt_explain,
+ /* Version 3.44.0 and later */
+ sqlite3_get_clientdata,
+ sqlite3_set_clientdata
};
/* True if x is the directory separator character
@@ -134030,6 +136722,10 @@ static int sqlite3LoadExtension(
*/
if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found;
+ /* Do not allow sqlite3_load_extension() to link to a copy of the
+ ** running application, by passing in an empty filename. */
+ if( nMsg==0 ) goto extension_not_found;
+
handle = sqlite3OsDlOpen(pVfs, zFile);
#if SQLITE_OS_UNIX || SQLITE_OS_WIN
for(ii=0; ii<ArraySize(azEndings) && handle==0; ii++){
@@ -134162,6 +136858,9 @@ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){
** default so as not to open security holes in older applications.
*/
SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
if( onoff ){
db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc;
@@ -134211,6 +136910,9 @@ SQLITE_API int sqlite3_auto_extension(
void (*xInit)(void)
){
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( xInit==0 ) return SQLITE_MISUSE_BKPT;
+#endif
#ifndef SQLITE_OMIT_AUTOINIT
rc = sqlite3_initialize();
if( rc ){
@@ -134263,6 +136965,9 @@ SQLITE_API int sqlite3_cancel_auto_extension(
int i;
int n = 0;
wsdAutoextInit;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( xInit==0 ) return 0;
+#endif
sqlite3_mutex_enter(mutex);
for(i=(int)wsdAutoext.nExt-1; i>=0; i--){
if( wsdAutoext.aExt[i]==xInit ){
@@ -135862,7 +138567,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
**
** The first form reports the current local setting for the
** page cache spill size. The second form turns cache spill on
- ** or off. When turnning cache spill on, the size is set to the
+ ** or off. When turning cache spill on, the size is set to the
** current cache_size. The third form sets a spill size that
** may be different form the cache size.
** If N is positive then that is the
@@ -136132,7 +138837,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
#endif
if( sqlite3GetBoolean(zRight, 0) ){
- db->flags |= mask;
+ if( (mask & SQLITE_WriteSchema)==0
+ || (db->flags & SQLITE_Defensive)==0
+ ){
+ db->flags |= mask;
+ }
}else{
db->flags &= ~mask;
if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0;
@@ -136640,9 +139349,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
** The "quick_check" is reduced version of
** integrity_check designed to detect most database corruption
** without the overhead of cross-checking indexes. Quick_check
- ** is linear time wherease integrity_check is O(NlogN).
+ ** is linear time whereas integrity_check is O(NlogN).
**
- ** The maximum nubmer of errors is 100 by default. A different default
+ ** The maximum number of errors is 100 by default. A different default
** can be specified using a numeric parameter N.
**
** Or, the parameter N can be the name of a table. In that case, only
@@ -136765,8 +139474,32 @@ SQLITE_PRIVATE void sqlite3Pragma(
int r2; /* Previous key for WITHOUT ROWID tables */
int mxCol; /* Maximum non-virtual column number */
- if( !IsOrdinaryTable(pTab) ) continue;
if( pObjTab && pObjTab!=pTab ) continue;
+ if( !IsOrdinaryTable(pTab) ){
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ sqlite3_vtab *pVTab;
+ int a1;
+ if( !IsVirtual(pTab) ) continue;
+ if( pTab->nCol<=0 ){
+ const char *zMod = pTab->u.vtab.azArg[0];
+ if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue;
+ }
+ sqlite3ViewGetColumnNames(pParse, pTab);
+ if( pTab->u.vtab.p==0 ) continue;
+ pVTab = pTab->u.vtab.p->pVtab;
+ if( NEVER(pVTab==0) ) continue;
+ if( NEVER(pVTab->pModule==0) ) continue;
+ if( pVTab->pModule->iVersion<4 ) continue;
+ if( pVTab->pModule->xIntegrity==0 ) continue;
+ sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick);
+ pTab->nTabRef++;
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF);
+ a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, a1);
+#endif
+ continue;
+ }
if( isQuick || HasRowid(pTab) ){
pPk = 0;
r2 = 0;
@@ -137400,7 +140133,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
Schema *pSchema; /* The current schema */
Table *pTab; /* A table in the schema */
Index *pIdx; /* An index of the table */
- LogEst szThreshold; /* Size threshold above which reanalysis is needd */
+ LogEst szThreshold; /* Size threshold above which reanalysis needed */
char *zSubSql; /* SQL statement for the OP_SqlExec opcode */
u32 opMask; /* Mask of operations to perform */
@@ -137892,7 +140625,8 @@ static const sqlite3_module pragmaVtabModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
/*
@@ -138516,8 +141250,6 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){
db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue;
assert( pParse->db->pParse==pParse );
db->pParse = pParse->pOuterParse;
- pParse->db = 0;
- pParse->disableLookaside = 0;
}
/*
@@ -138526,7 +141258,7 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){
** immediately.
**
** Use this mechanism for uncommon cleanups. There is a higher setup
-** cost for this mechansim (an extra malloc), so it should not be used
+** cost for this mechanism (an extra malloc), so it should not be used
** for common cleanups that happen on most calls. But for less
** common cleanups, we save a single NULL-pointer comparison in
** sqlite3ParseObjectReset(), which reduces the total CPU cycle count.
@@ -138618,7 +141350,12 @@ static int sqlite3Prepare(
sParse.pOuterParse = db->pParse;
db->pParse = &sParse;
sParse.db = db;
- sParse.pReprepare = pReprepare;
+ if( pReprepare ){
+ sParse.pReprepare = pReprepare;
+ sParse.explain = sqlite3_stmt_isexplain((sqlite3_stmt*)pReprepare);
+ }else{
+ assert( sParse.pReprepare==0 );
+ }
assert( ppStmt && *ppStmt==0 );
if( db->mallocFailed ){
sqlite3ErrorMsg(&sParse, "out of memory");
@@ -138783,6 +141520,7 @@ static int sqlite3LockAndPrepare(
assert( (rc&db->errMask)==rc );
db->busyHandler.nBusy = 0;
sqlite3_mutex_leave(db->mutex);
+ assert( rc==SQLITE_OK || (*ppStmt)==0 );
return rc;
}
@@ -139180,6 +141918,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){
if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1);
}
+SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){
+ if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1);
+}
/*
** Return a pointer to the right-most SELECT statement in a compound.
@@ -139228,7 +141969,7 @@ static Select *findRightmost(Select *p){
** NATURAL FULL OUTER JT_NATRUAL|JT_LEFT|JT_RIGHT
**
** To preserve historical compatibly, SQLite also accepts a variety
-** of other non-standard and in many cases non-sensical join types.
+** of other non-standard and in many cases nonsensical join types.
** This routine makes as much sense at it can from the nonsense join
** type and returns a result. Examples of accepted nonsense join types
** include but are not limited to:
@@ -139450,6 +142191,7 @@ static void unsetJoinExpr(Expr *p, int iTable, int nullable){
}
if( p->op==TK_FUNCTION ){
assert( ExprUseXList(p) );
+ assert( p->pLeft==0 );
if( p->x.pList ){
int i;
for(i=0; i<p->x.pList->nExpr; i++){
@@ -139499,7 +142241,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){
if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue;
joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON;
- /* If this is a NATURAL join, synthesize an approprate USING clause
+ /* If this is a NATURAL join, synthesize an appropriate USING clause
** to specify which columns should be joined.
*/
if( pRight->fg.jointype & JT_NATURAL ){
@@ -139715,7 +142457,7 @@ static void pushOntoSorter(
** (3) Some output columns are omitted from the sort record due to
** the SQLITE_ENABLE_SORTER_REFERENCES optimization, or due to the
** SQLITE_ECEL_OMITREF optimization, or due to the
- ** SortCtx.pDeferredRowLoad optimiation. In any of these cases
+ ** SortCtx.pDeferredRowLoad optimization. In any of these cases
** regOrigData is 0 to prevent this routine from trying to copy
** values that might not yet exist.
*/
@@ -139771,7 +142513,7 @@ static void pushOntoSorter(
testcase( pKI->nAllField > pKI->nKeyField+2 );
pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat,
pKI->nAllField-pKI->nKeyField-1);
- pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */
+ pOp = 0; /* Ensure pOp not used after sqlite3VdbeAddOp3() */
addrJmp = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse);
@@ -139865,7 +142607,7 @@ static void codeOffset(
** The returned value in this case is a copy of parameter iTab.
**
** WHERE_DISTINCT_ORDERED:
-** In this case rows are being delivered sorted order. The ephermal
+** In this case rows are being delivered sorted order. The ephemeral
** table is not required. Instead, the current set of values
** is compared against previous row. If they match, the new row
** is not distinct and control jumps to VM address addrRepeat. Otherwise,
@@ -140294,6 +143036,16 @@ static void selectInnerLoop(
testcase( eDest==SRT_Fifo );
testcase( eDest==SRT_DistFifo );
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
+#if !defined(SQLITE_ENABLE_NULL_TRIM) && defined(SQLITE_DEBUG)
+ /* A destination of SRT_Table and a non-zero iSDParm2 parameter means
+ ** that this is an "UPDATE ... FROM" on a virtual table or view. In this
+ ** case set the p5 parameter of the OP_MakeRecord to OPFLAG_NOCHNG_MAGIC.
+ ** This does not affect operation in any way - it just allows MakeRecord
+ ** to process OPFLAG_NOCHANGE values without an assert() failing. */
+ if( eDest==SRT_Table && pDest->iSDParm2 ){
+ sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG_MAGIC);
+ }
+#endif
#ifndef SQLITE_OMIT_CTE
if( eDest==SRT_DistFifo ){
/* If the destination is DistFifo, then cursor (iParm+1) is open
@@ -141097,13 +143849,6 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames(
int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */
int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */
-#ifndef SQLITE_OMIT_EXPLAIN
- /* If this is an EXPLAIN, skip this step */
- if( pParse->explain ){
- return;
- }
-#endif
-
if( pParse->colNamesSet ) return;
/* Column names are determined by the left-most term of a compound select */
while( pSelect->pPrior ) pSelect = pSelect->pPrior;
@@ -141290,7 +144035,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
** kind (maybe a parenthesized subquery in the FROM clause of a larger
** query, or a VIEW, or a CTE). This routine computes type information
** for that Table object based on the Select object that implements the
-** subquery. For the purposes of this routine, "type infomation" means:
+** subquery. For the purposes of this routine, "type information" means:
**
** * The datatype name, as it might appear in a CREATE TABLE statement
** * Which collating sequence to use for the column
@@ -141311,7 +144056,8 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(
NameContext sNC;
assert( pSelect!=0 );
- assert( (pSelect->selFlags & SF_Resolved)!=0 );
+ testcase( (pSelect->selFlags & SF_Resolved)==0 );
+ assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT );
assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 );
assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB );
if( db->mallocFailed || IN_RENAME_OBJECT ) return;
@@ -141619,7 +144365,7 @@ static void generateWithRecursiveQuery(
int iQueue; /* The Queue table */
int iDistinct = 0; /* To ensure unique results if UNION */
int eDest = SRT_Fifo; /* How to write to Queue */
- SelectDest destQueue; /* SelectDest targetting the Queue table */
+ SelectDest destQueue; /* SelectDest targeting the Queue table */
int i; /* Loop counter */
int rc; /* Result code */
ExprList *pOrderBy; /* The ORDER BY clause */
@@ -142195,9 +144941,7 @@ multi_select_end:
pDest->iSdst = dest.iSdst;
pDest->nSdst = dest.nSdst;
if( pDelete ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete,
- pDelete);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete);
}
return rc;
}
@@ -142219,7 +144963,7 @@ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){
/*
** Code an output subroutine for a coroutine implementation of a
-** SELECT statment.
+** SELECT statement.
**
** The data to be output is contained in pIn->iSdst. There are
** pIn->nSdst columns to be output. pDest is where the output should
@@ -142441,7 +145185,7 @@ static int generateOutputSubroutine(
**
** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not
** actually called using Gosub and they do not Return. EofA and EofB loop
-** until all data is exhausted then jump to the "end" labe. AltB, AeqB,
+** until all data is exhausted then jump to the "end" label. AltB, AeqB,
** and AgtB jump to either L2 or to one of EofA or EofB.
*/
#ifndef SQLITE_OMIT_COMPOUND_SELECT
@@ -142478,7 +145222,7 @@ static int multiSelectOrderBy(
int savedOffset; /* Saved value of p->iOffset */
int labelCmpr; /* Label for the start of the merge algorithm */
int labelEnd; /* Label for the end of the overall SELECT stmt */
- int addr1; /* Jump instructions that get retargetted */
+ int addr1; /* Jump instructions that get retargeted */
int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */
KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */
KeyInfo *pKeyMerge; /* Comparison information for merging rows */
@@ -142748,8 +145492,7 @@ static int multiSelectOrderBy(
/* Make arrangements to free the 2nd and subsequent arms of the compound
** after the parse has finished */
if( pSplit->pPrior ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior);
+ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior);
}
pSplit->pPrior = pPrior;
pPrior->pNext = pSplit;
@@ -142847,11 +145590,14 @@ static Expr *substExpr(
#endif
{
Expr *pNew;
- int iColumn = pExpr->iColumn;
- Expr *pCopy = pSubst->pEList->a[iColumn].pExpr;
+ int iColumn;
+ Expr *pCopy;
Expr ifNullRow;
+ iColumn = pExpr->iColumn;
+ assert( iColumn>=0 );
assert( pSubst->pEList!=0 && iColumn<pSubst->pEList->nExpr );
assert( pExpr->pRight==0 );
+ pCopy = pSubst->pEList->a[iColumn].pExpr;
if( sqlite3ExprIsVector(pCopy) ){
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
@@ -143200,7 +145946,7 @@ static int compoundHasDifferentAffinities(Select *p){
** (9) If the subquery uses LIMIT then the outer query may not be aggregate.
**
** (**) Restriction (10) was removed from the code on 2005-02-05 but we
-** accidently carried the comment forward until 2014-09-15. Original
+** accidentally carried the comment forward until 2014-09-15. Original
** constraint: "If the subquery is aggregate then the outer query
** may not use LIMIT."
**
@@ -143292,7 +146038,8 @@ static int compoundHasDifferentAffinities(Select *p){
** (27b) the subquery is a compound query and the RIGHT JOIN occurs
** in any arm of the compound query. (See also (17g).)
**
-** (28) The subquery is not a MATERIALIZED CTE.
+** (28) The subquery is not a MATERIALIZED CTE. (This is handled
+** in the caller before ever reaching this routine.)
**
**
** In this routine, the "p" parameter is a pointer to the outer query.
@@ -143402,9 +146149,9 @@ static int flattenSubquery(
if( iFrom>0 && (pSubSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
return 0; /* Restriction (27a) */
}
- if( pSubitem->fg.isCte && pSubitem->u2.pCteUse->eM10d==M10d_Yes ){
- return 0; /* (28) */
- }
+
+ /* Condition (28) is blocked by the caller */
+ assert( !pSubitem->fg.isCte || pSubitem->u2.pCteUse->eM10d!=M10d_Yes );
/* Restriction (17): If the sub-query is a compound SELECT, then it must
** use only the UNION ALL operator. And none of the simple select queries
@@ -143474,7 +146221,7 @@ static int flattenSubquery(
testcase( i==SQLITE_DENY );
pParse->zAuthContext = zSavedAuthContext;
- /* Delete the transient structures associated with thesubquery */
+ /* Delete the transient structures associated with the subquery */
pSub1 = pSubitem->pSelect;
sqlite3DbFree(db, pSubitem->zDatabase);
sqlite3DbFree(db, pSubitem->zName);
@@ -143566,9 +146313,7 @@ static int flattenSubquery(
Table *pTabToDel = pSubitem->pTab;
if( pTabToDel->nTabRef==1 ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
- sqlite3ParserAddCleanup(pToplevel,
- (void(*)(sqlite3*,void*))sqlite3DeleteTable,
- pTabToDel);
+ sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel);
testcase( pToplevel->earlyCleanup );
}else{
pTabToDel->nTabRef--;
@@ -143656,7 +146401,7 @@ static int flattenSubquery(
** ORDER BY column expression is identical to the iOrderByCol'th
** expression returned by SELECT statement pSub. Since these values
** do not necessarily correspond to columns in SELECT statement pParent,
- ** zero them before transfering the ORDER BY clause.
+ ** zero them before transferring the ORDER BY clause.
**
** Not doing this may cause an error if a subsequent call to this
** function attempts to flatten a compound sub-query into pParent
@@ -143716,8 +146461,7 @@ static int flattenSubquery(
}
}
- /* Finially, delete what is left of the subquery and return
- ** success.
+ /* Finally, delete what is left of the subquery and return success.
*/
sqlite3AggInfoPersistWalkerInit(&w, pParse);
sqlite3WalkSelect(&w,pSub1);
@@ -143752,7 +146496,7 @@ struct WhereConst {
/*
** Add a new entry to the pConst object. Except, do not add duplicate
-** pColumn entires. Also, do not add if doing so would not be appropriate.
+** pColumn entries. Also, do not add if doing so would not be appropriate.
**
** The caller guarantees the pColumn is a column and pValue is a constant.
** This routine has to do some additional checks before completing the
@@ -143938,7 +146682,7 @@ static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){
** SELECT * FROM t1 WHERE a=123 AND b=123;
**
** The two SELECT statements above should return different answers. b=a
-** is alway true because the comparison uses numeric affinity, but b=123
+** is always true because the comparison uses numeric affinity, but b=123
** is false because it uses text affinity and '0123' is not the same as '123'.
** To work around this, the expression tree is not actually changed from
** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol
@@ -144022,7 +146766,7 @@ static int propagateConstants(
** At the time this function is called it is guaranteed that
**
** * the sub-query uses only one distinct window frame, and
-** * that the window frame has a PARTITION BY clase.
+** * that the window frame has a PARTITION BY clause.
*/
static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){
assert( pSubq->pWin->pPartition );
@@ -144291,12 +147035,12 @@ static int disableUnusedSubqueryResultColumns(SrcItem *pItem){
assert( pItem->pSelect!=0 );
pSub = pItem->pSelect;
assert( pSub->pEList->nExpr==pTab->nCol );
- if( (pSub->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){
- testcase( pSub->selFlags & SF_Distinct );
- testcase( pSub->selFlags & SF_Aggregate );
- return 0;
- }
for(pX=pSub; pX; pX=pX->pPrior){
+ if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){
+ testcase( pX->selFlags & SF_Distinct );
+ testcase( pX->selFlags & SF_Aggregate );
+ return 0;
+ }
if( pX->pPrior && pX->op!=TK_ALL ){
/* This optimization does not work for compound subqueries that
** use UNION, INTERSECT, or EXCEPT. Only UNION ALL is allowed. */
@@ -144616,8 +147360,7 @@ static struct Cte *searchWith(
SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){
if( pWith ){
if( bFree ){
- pWith = (With*)sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3WithDelete,
+ pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric,
pWith);
if( pWith==0 ) return 0;
}
@@ -145102,12 +147845,20 @@ static int selectExpander(Walker *pWalker, Select *p){
** expanded. */
int tableSeen = 0; /* Set to 1 when TABLE matches */
char *zTName = 0; /* text of name of TABLE */
+ int iErrOfst;
if( pE->op==TK_DOT ){
+ assert( (selFlags & SF_NestedFrom)==0 );
assert( pE->pLeft!=0 );
assert( !ExprHasProperty(pE->pLeft, EP_IntValue) );
zTName = pE->pLeft->u.zToken;
+ assert( ExprUseWOfst(pE->pLeft) );
+ iErrOfst = pE->pRight->w.iOfst;
+ }else{
+ assert( ExprUseWOfst(pE) );
+ iErrOfst = pE->w.iOfst;
}
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
+ int nAdd; /* Number of cols including rowid */
Table *pTab = pFrom->pTab; /* Table for this data source */
ExprList *pNestedFrom; /* Result-set of a nested FROM clause */
char *zTabName; /* AS name for this data source */
@@ -145125,6 +147876,7 @@ static int selectExpander(Walker *pWalker, Select *p){
pNestedFrom = pFrom->pSelect->pEList;
assert( pNestedFrom!=0 );
assert( pNestedFrom->nExpr==pTab->nCol );
+ assert( VisibleRowid(pTab)==0 );
}else{
if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
continue;
@@ -145142,6 +147894,7 @@ static int selectExpander(Walker *pWalker, Select *p){
for(ii=0; ii<pUsing->nId; ii++){
const char *zUName = pUsing->a[ii].zName;
pRight = sqlite3Expr(db, TK_ID, zUName);
+ sqlite3ExprSetErrorOffset(pRight, iErrOfst);
pNew = sqlite3ExprListAppend(pParse, pNew, pRight);
if( pNew ){
struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
@@ -145154,33 +147907,48 @@ static int selectExpander(Walker *pWalker, Select *p){
}else{
pUsing = 0;
}
- for(j=0; j<pTab->nCol; j++){
- char *zName = pTab->aCol[j].zCnName;
+
+ nAdd = pTab->nCol + (VisibleRowid(pTab) && (selFlags&SF_NestedFrom));
+ for(j=0; j<nAdd; j++){
+ const char *zName;
struct ExprList_item *pX; /* Newly added ExprList term */
- assert( zName );
- if( zTName
- && pNestedFrom
- && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0)==0
- ){
- continue;
- }
+ if( j==pTab->nCol ){
+ zName = sqlite3RowidAlias(pTab);
+ if( zName==0 ) continue;
+ }else{
+ zName = pTab->aCol[j].zCnName;
- /* If a column is marked as 'hidden', omit it from the expanded
- ** result-set list unless the SELECT has the SF_IncludeHidden
- ** bit set.
- */
- if( (p->selFlags & SF_IncludeHidden)==0
- && IsHiddenColumn(&pTab->aCol[j])
- ){
- continue;
- }
- if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
- && zTName==0
- && (selFlags & (SF_NestedFrom))==0
- ){
- continue;
+ /* If pTab is actually an SF_NestedFrom sub-select, do not
+ ** expand any ENAME_ROWID columns. */
+ if( pNestedFrom && pNestedFrom->a[j].fg.eEName==ENAME_ROWID ){
+ continue;
+ }
+
+ if( zTName
+ && pNestedFrom
+ && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0, 0)==0
+ ){
+ continue;
+ }
+
+ /* If a column is marked as 'hidden', omit it from the expanded
+ ** result-set list unless the SELECT has the SF_IncludeHidden
+ ** bit set.
+ */
+ if( (p->selFlags & SF_IncludeHidden)==0
+ && IsHiddenColumn(&pTab->aCol[j])
+ ){
+ continue;
+ }
+ if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ && zTName==0
+ && (selFlags & (SF_NestedFrom))==0
+ ){
+ continue;
+ }
}
+ assert( zName );
tableSeen = 1;
if( i>0 && zTName==0 && (selFlags & SF_NestedFrom)==0 ){
@@ -145214,6 +147982,7 @@ static int selectExpander(Walker *pWalker, Select *p){
}else{
pExpr = pRight;
}
+ sqlite3ExprSetErrorOffset(pExpr, iErrOfst);
pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
if( pNew==0 ){
break; /* OOM */
@@ -145229,11 +147998,11 @@ static int selectExpander(Walker *pWalker, Select *p){
zSchemaName, zTabName, zName);
testcase( pX->zEName==0 );
}
- pX->fg.eEName = ENAME_TAB;
+ pX->fg.eEName = (j==pTab->nCol ? ENAME_ROWID : ENAME_TAB);
if( (pFrom->fg.isUsing
&& sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0)
|| (pUsing && sqlite3IdListIndex(pUsing, zName)>=0)
- || (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ || (j<pTab->nCol && (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND))
){
pX->fg.bNoExpand = 1;
}
@@ -145335,10 +148104,11 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
SrcList *pTabList;
SrcItem *pFrom;
- assert( p->selFlags & SF_Resolved );
if( p->selFlags & SF_HasTypeInfo ) return;
p->selFlags |= SF_HasTypeInfo;
pParse = pWalker->pParse;
+ testcase( (p->selFlags & SF_Resolved)==0 );
+ assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT );
pTabList = p->pSrc;
for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){
Table *pTab = pFrom->pTab;
@@ -145454,8 +148224,14 @@ static void analyzeAggFuncArgs(
pNC->ncFlags |= NC_InAggFunc;
for(i=0; i<pAggInfo->nFunc; i++){
Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
+ assert( pExpr->op==TK_FUNCTION || pExpr->op==TK_AGG_FUNCTION );
assert( ExprUseXList(pExpr) );
sqlite3ExprAnalyzeAggList(pNC, pExpr->x.pList);
+ if( pExpr->pLeft ){
+ assert( pExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pExpr->pLeft) );
+ sqlite3ExprAnalyzeAggList(pNC, pExpr->pLeft->x.pList);
+ }
#ifndef SQLITE_OMIT_WINDOWFUNC
assert( !IsWindowFunc(pExpr) );
if( ExprHasProperty(pExpr, EP_WinFunc) ){
@@ -145530,7 +148306,7 @@ static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){
pExpr->op = TK_AGG_COLUMN;
pExpr->iTable = pCol->iTable;
pExpr->iColumn = pCol->iColumn;
- ExprClearProperty(pExpr, EP_Skip|EP_Collate);
+ ExprClearProperty(pExpr, EP_Skip|EP_Collate|EP_Unlikely);
return WRC_Prune;
}
@@ -145561,7 +148337,7 @@ static void aggregateConvertIndexedExprRefToColumn(AggInfo *pAggInfo){
** * The aCol[] and aFunc[] arrays may be modified
** * The AggInfoColumnReg() and AggInfoFuncReg() macros may not be used
**
-** After clling this routine:
+** After calling this routine:
**
** * The aCol[] and aFunc[] arrays are fixed
** * The AggInfoColumnReg() and AggInfoFuncReg() macros may be used
@@ -145610,6 +148386,36 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
pFunc->pFunc->zName));
}
}
+ if( pFunc->iOBTab>=0 ){
+ ExprList *pOBList;
+ KeyInfo *pKeyInfo;
+ int nExtra = 0;
+ assert( pFunc->pFExpr->pLeft!=0 );
+ assert( pFunc->pFExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pFunc->pFExpr->pLeft) );
+ assert( pFunc->pFunc!=0 );
+ pOBList = pFunc->pFExpr->pLeft->x.pList;
+ if( !pFunc->bOBUnique ){
+ nExtra++; /* One extra column for the OP_Sequence */
+ }
+ if( pFunc->bOBPayload ){
+ /* extra columns for the function arguments */
+ assert( ExprUseXList(pFunc->pFExpr) );
+ nExtra += pFunc->pFExpr->x.pList->nExpr;
+ }
+ if( pFunc->bUseSubtype ){
+ nExtra += pFunc->pFExpr->x.pList->nExpr;
+ }
+ pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra);
+ if( !pFunc->bOBUnique && pParse->nErr==0 ){
+ pKeyInfo->nKeyField++;
+ }
+ sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
+ pFunc->iOBTab, pOBList->nExpr+nExtra, 0,
+ (char*)pKeyInfo, P4_KEYINFO);
+ ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(ORDER BY)",
+ pFunc->pFunc->zName));
+ }
}
}
@@ -145625,13 +148431,56 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
pList = pF->pFExpr->x.pList;
+ if( pF->iOBTab>=0 ){
+ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs
+ ** were stored in emphermal table pF->iOBTab. Here, we extract those
+ ** inputs (in ORDER BY order) and make all calls to OP_AggStep
+ ** before doing the OP_AggFinal call. */
+ int iTop; /* Start of loop for extracting columns */
+ int nArg; /* Number of columns to extract */
+ int nKey; /* Key columns to be skipped */
+ int regAgg; /* Extract into this array */
+ int j; /* Loop counter */
+
+ assert( pF->pFunc!=0 );
+ nArg = pList->nExpr;
+ regAgg = sqlite3GetTempRange(pParse, nArg);
+
+ if( pF->bOBPayload==0 ){
+ nKey = 0;
+ }else{
+ assert( pF->pFExpr->pLeft!=0 );
+ assert( ExprUseXList(pF->pFExpr->pLeft) );
+ assert( pF->pFExpr->pLeft->x.pList!=0 );
+ nKey = pF->pFExpr->pLeft->x.pList->nExpr;
+ if( ALWAYS(!pF->bOBUnique) ) nKey++;
+ }
+ iTop = sqlite3VdbeAddOp1(v, OP_Rewind, pF->iOBTab); VdbeCoverage(v);
+ for(j=nArg-1; j>=0; j--){
+ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j);
+ }
+ if( pF->bUseSubtype ){
+ int regSubtype = sqlite3GetTempReg(pParse);
+ int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0);
+ for(j=nArg-1; j>=0; j--){
+ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype);
+ sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j);
+ }
+ sqlite3ReleaseTempReg(pParse, regSubtype);
+ }
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, iTop);
+ sqlite3ReleaseTempRange(pParse, regAgg, nArg);
+ }
sqlite3VdbeAddOp2(v, OP_AggFinal, AggInfoFuncReg(pAggInfo,i),
pList ? pList->nExpr : 0);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
}
}
-
/*
** Generate code that will update the accumulator memory cells for an
** aggregate based on the current cursor position.
@@ -145640,6 +148489,13 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
** registers if register regAcc contains 0. The caller will take care
** of setting and clearing regAcc.
+**
+** For an ORDER BY aggregate, the actual accumulator memory cell update
+** is deferred until after all input rows have been received, so that they
+** can be run in the requested order. In that case, instead of invoking
+** OP_AggStep to update the accumulator, just add the arguments that would
+** have been passed into OP_AggStep into the sorting ephemeral table
+** (along with the appropriate sort key).
*/
static void updateAccumulator(
Parse *pParse,
@@ -145661,9 +148517,12 @@ static void updateAccumulator(
int nArg;
int addrNext = 0;
int regAgg;
+ int regAggSz = 0;
+ int regDistinct = 0;
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
assert( !IsWindowFunc(pF->pFExpr) );
+ assert( pF->pFunc!=0 );
pList = pF->pFExpr->x.pList;
if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){
Expr *pFilter = pF->pFExpr->y.pWin->pFilter;
@@ -145687,9 +148546,55 @@ static void updateAccumulator(
addrNext = sqlite3VdbeMakeLabel(pParse);
sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL);
}
- if( pList ){
+ if( pF->iOBTab>=0 ){
+ /* Instead of invoking AggStep, we must push the arguments that would
+ ** have been passed to AggStep onto the sorting table. */
+ int jj; /* Registered used so far in building the record */
+ ExprList *pOBList; /* The ORDER BY clause */
+ assert( pList!=0 );
+ nArg = pList->nExpr;
+ assert( nArg>0 );
+ assert( pF->pFExpr->pLeft!=0 );
+ assert( pF->pFExpr->pLeft->op==TK_ORDER );
+ assert( ExprUseXList(pF->pFExpr->pLeft) );
+ pOBList = pF->pFExpr->pLeft->x.pList;
+ assert( pOBList!=0 );
+ assert( pOBList->nExpr>0 );
+ regAggSz = pOBList->nExpr;
+ if( !pF->bOBUnique ){
+ regAggSz++; /* One register for OP_Sequence */
+ }
+ if( pF->bOBPayload ){
+ regAggSz += nArg;
+ }
+ if( pF->bUseSubtype ){
+ regAggSz += nArg;
+ }
+ regAggSz++; /* One extra register to hold result of MakeRecord */
+ regAgg = sqlite3GetTempRange(pParse, regAggSz);
+ regDistinct = regAgg;
+ sqlite3ExprCodeExprList(pParse, pOBList, regAgg, 0, SQLITE_ECEL_DUP);
+ jj = pOBList->nExpr;
+ if( !pF->bOBUnique ){
+ sqlite3VdbeAddOp2(v, OP_Sequence, pF->iOBTab, regAgg+jj);
+ jj++;
+ }
+ if( pF->bOBPayload ){
+ regDistinct = regAgg+jj;
+ sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP);
+ jj += nArg;
+ }
+ if( pF->bUseSubtype ){
+ int kk;
+ int regBase = pF->bOBPayload ? regDistinct : regAgg;
+ for(kk=0; kk<nArg; kk++, jj++){
+ sqlite3VdbeAddOp2(v, OP_GetSubtype, regBase+kk, regAgg+jj);
+ }
+ }
+ }else if( pList ){
nArg = pList->nExpr;
regAgg = sqlite3GetTempRange(pParse, nArg);
+ regDistinct = regAgg;
sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP);
}else{
nArg = 0;
@@ -145700,26 +148605,37 @@ static void updateAccumulator(
addrNext = sqlite3VdbeMakeLabel(pParse);
}
pF->iDistinct = codeDistinct(pParse, eDistinctType,
- pF->iDistinct, addrNext, pList, regAgg);
- }
- if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
- CollSeq *pColl = 0;
- struct ExprList_item *pItem;
- int j;
- assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
- for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
- pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
- }
- if( !pColl ){
- pColl = pParse->db->pDfltColl;
+ pF->iDistinct, addrNext, pList, regDistinct);
+ }
+ if( pF->iOBTab>=0 ){
+ /* Insert a new record into the ORDER BY table */
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, regAgg, regAggSz-1,
+ regAgg+regAggSz-1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pF->iOBTab, regAgg+regAggSz-1,
+ regAgg, regAggSz-1);
+ sqlite3ReleaseTempRange(pParse, regAgg, regAggSz);
+ }else{
+ /* Invoke the AggStep function */
+ if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
+ CollSeq *pColl = 0;
+ struct ExprList_item *pItem;
+ int j;
+ assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */
+ for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){
+ pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr);
+ }
+ if( !pColl ){
+ pColl = pParse->db->pDfltColl;
+ }
+ if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
+ sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0,
+ (char *)pColl, P4_COLLSEQ);
}
- if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
- sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3ReleaseTempRange(pParse, regAgg, nArg);
}
- sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
- sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
- sqlite3VdbeChangeP5(v, (u8)nArg);
- sqlite3ReleaseTempRange(pParse, regAgg, nArg);
if( addrNext ){
sqlite3VdbeResolveLabel(v, addrNext);
}
@@ -145878,7 +148794,8 @@ static SrcItem *isSelfJoinView(
/*
** Deallocate a single AggInfo object
*/
-static void agginfoFree(sqlite3 *db, AggInfo *p){
+static void agginfoFree(sqlite3 *db, void *pArg){
+ AggInfo *p = (AggInfo*)pArg;
sqlite3DbFree(db, p->aCol);
sqlite3DbFree(db, p->aFunc);
sqlite3DbFreeNN(db, p);
@@ -145952,7 +148869,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
pSub->selFlags |= SF_Aggregate;
pSub->selFlags &= ~SF_Compound;
pSub->nSelectRow = 0;
- sqlite3ExprListDelete(db, pSub->pEList);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList);
pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount;
pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm);
pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
@@ -146132,9 +149049,8 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY");
}
#endif
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- p->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ p->pOrderBy);
testcase( pParse->earlyCleanup );
p->pOrderBy = 0;
}
@@ -146215,22 +149131,58 @@ SQLITE_PRIVATE int sqlite3Select(
** to a real table */
assert( pTab!=0 );
- /* Convert LEFT JOIN into JOIN if there are terms of the right table
- ** of the LEFT JOIN used in the WHERE clause.
+ /* Try to simplify joins:
+ **
+ ** LEFT JOIN -> JOIN
+ ** RIGHT JOIN -> JOIN
+ ** FULL JOIN -> RIGHT JOIN
+ **
+ ** If terms of the i-th table are used in the WHERE clause in such a
+ ** way that the i-th table cannot be the NULL row of a join, then
+ ** perform the appropriate simplification. This is called
+ ** "OUTER JOIN strength reduction" in the SQLite documentation.
*/
- if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==JT_LEFT
- && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
+ if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0
+ && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor,
+ pItem->fg.jointype & JT_LTORJ)
&& OptimizationEnabled(db, SQLITE_SimplifyJoin)
){
- TREETRACE(0x1000,pParse,p,
- ("LEFT-JOIN simplifies to JOIN on term %d\n",i));
- pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
- assert( pItem->iCursor>=0 );
- unsetJoinExpr(p->pWhere, pItem->iCursor,
- pTabList->a[0].fg.jointype & JT_LTORJ);
+ if( pItem->fg.jointype & JT_LEFT ){
+ if( pItem->fg.jointype & JT_RIGHT ){
+ TREETRACE(0x1000,pParse,p,
+ ("FULL-JOIN simplifies to RIGHT-JOIN on term %d\n",i));
+ pItem->fg.jointype &= ~JT_LEFT;
+ }else{
+ TREETRACE(0x1000,pParse,p,
+ ("LEFT-JOIN simplifies to JOIN on term %d\n",i));
+ pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
+ unsetJoinExpr(p->pWhere, pItem->iCursor, 0);
+ }
+ }
+ if( pItem->fg.jointype & JT_LTORJ ){
+ for(j=i+1; j<pTabList->nSrc; j++){
+ SrcItem *pI2 = &pTabList->a[j];
+ if( pI2->fg.jointype & JT_RIGHT ){
+ if( pI2->fg.jointype & JT_LEFT ){
+ TREETRACE(0x1000,pParse,p,
+ ("FULL-JOIN simplifies to LEFT-JOIN on term %d\n",j));
+ pI2->fg.jointype &= ~JT_RIGHT;
+ }else{
+ TREETRACE(0x1000,pParse,p,
+ ("RIGHT-JOIN simplifies to JOIN on term %d\n",j));
+ pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER);
+ unsetJoinExpr(p->pWhere, pI2->iCursor, 1);
+ }
+ }
+ }
+ for(j=pTabList->nSrc-1; j>=0; j--){
+ pTabList->a[j].fg.jointype &= ~JT_LTORJ;
+ if( pTabList->a[j].fg.jointype & JT_RIGHT ) break;
+ }
+ }
}
- /* No futher action if this term of the FROM clause is not a subquery */
+ /* No further action if this term of the FROM clause is not a subquery */
if( pSub==0 ) continue;
/* Catch mismatch in the declared columns of a view and the number of
@@ -146241,6 +149193,14 @@ SQLITE_PRIVATE int sqlite3Select(
goto select_end;
}
+ /* Do not attempt the usual optimizations (flattening and ORDER BY
+ ** elimination) on a MATERIALIZED common table expression because
+ ** a MATERIALIZED common table expression is an optimization fence.
+ */
+ if( pItem->fg.isCte && pItem->u2.pCteUse->eM10d==M10d_Yes ){
+ continue;
+ }
+
/* Do not try to flatten an aggregate subquery.
**
** Flattening an aggregate subquery is only possible if the outer query
@@ -146270,6 +149230,8 @@ SQLITE_PRIVATE int sqlite3Select(
** (a) The outer query has a different ORDER BY clause
** (b) The subquery is part of a join
** See forum post 062d576715d277c8
+ **
+ ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled.
*/
if( pSub->pOrderBy!=0
&& (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */
@@ -146280,9 +149242,8 @@ SQLITE_PRIVATE int sqlite3Select(
){
TREETRACE(0x800,pParse,p,
("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1));
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
- pSub->pOrderBy);
+ sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric,
+ pSub->pOrderBy);
pSub->pOrderBy = 0;
}
@@ -146484,7 +149445,7 @@ SQLITE_PRIVATE int sqlite3Select(
}else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){
/* This is a CTE for which materialization code has already been
** generated. Invoke the subroutine to compute the materialization,
- ** the make the pItem->iCursor be a copy of the ephemerial table that
+ ** the make the pItem->iCursor be a copy of the ephemeral table that
** holds the result of the materialization. */
CteUse *pCteUse = pItem->u2.pCteUse;
sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e);
@@ -146811,8 +149772,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) );
if( pAggInfo ){
- sqlite3ParserAddCleanup(pParse,
- (void(*)(sqlite3*,void*))agginfoFree, pAggInfo);
+ sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo);
testcase( pParse->earlyCleanup );
}
if( db->mallocFailed ){
@@ -146867,7 +149827,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( pGroupBy ){
KeyInfo *pKeyInfo; /* Keying information for the group by clause */
- int addr1; /* A-vs-B comparision jump */
+ int addr1; /* A-vs-B comparison jump */
int addrOutputRow; /* Start of subroutine that outputs a result row */
int regOutputRow; /* Return address register for output subroutine */
int addrSetAbort; /* Set the abort flag and return */
@@ -146958,9 +149918,13 @@ SQLITE_PRIVATE int sqlite3Select(
int nCol;
int nGroupBy;
- explainTempTable(pParse,
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExp; /* Address of OP_Explain instruction */
+#endif
+ ExplainQueryPlan2(addrExp, (pParse, 0, "USE TEMP B-TREE FOR %s",
(sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ?
- "DISTINCT" : "GROUP BY");
+ "DISTINCT" : "GROUP BY"
+ ));
groupBySort = 1;
nGroupBy = pGroupBy->nExpr;
@@ -146985,18 +149949,23 @@ SQLITE_PRIVATE int sqlite3Select(
}
pAggInfo->directMode = 0;
regRecord = sqlite3GetTempReg(pParse);
+ sqlite3VdbeScanStatusCounters(v, addrExp, 0, sqlite3VdbeCurrentAddr(v));
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord);
sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord);
+ sqlite3VdbeScanStatusRange(v, addrExp, sqlite3VdbeCurrentAddr(v)-2, -1);
sqlite3ReleaseTempReg(pParse, regRecord);
sqlite3ReleaseTempRange(pParse, regBase, nCol);
TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++;
sortOut = sqlite3GetTempReg(pParse);
+ sqlite3VdbeScanStatusCounters(v, addrExp, sqlite3VdbeCurrentAddr(v), 0);
sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol);
sqlite3VdbeAddOp2(v, OP_SorterSort, pAggInfo->sortingIdx, addrEnd);
VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
pAggInfo->useSortingIdx = 1;
+ sqlite3VdbeScanStatusRange(v, addrExp, -1, sortPTab);
+ sqlite3VdbeScanStatusRange(v, addrExp, -1, pAggInfo->sortingIdx);
}
/* If there are entries in pAgggInfo->aFunc[] that contain subexpressions
@@ -147724,6 +150693,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables");
goto trigger_orphan_error;
}
+ if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){
+ sqlite3ErrorMsg(pParse, "cannot create triggers on shadow tables");
+ goto trigger_orphan_error;
+ }
/* Check that the trigger name is not reserved and that no trigger of the
** specified name exists */
@@ -148507,10 +151480,17 @@ static void codeReturningTrigger(
SrcList sFrom;
assert( v!=0 );
- assert( pParse->bReturning );
+ if( !pParse->bReturning ){
+ /* This RETURNING trigger must be for a different statement as
+ ** this statement lacks a RETURNING clause. */
+ return;
+ }
assert( db->pParse==pParse );
pReturning = pParse->u1.pReturning;
- assert( pTrigger == &(pReturning->retTrig) );
+ if( pTrigger != &(pReturning->retTrig) ){
+ /* This RETURNING trigger is for a different statement */
+ return;
+ }
memset(&sSelect, 0, sizeof(sSelect));
memset(&sFrom, 0, sizeof(sFrom));
sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0);
@@ -149247,7 +152227,7 @@ static void updateFromSelect(
assert( pTabList->nSrc>1 );
if( pSrc ){
- pSrc->a[0].fg.notCte = 1;
+ assert( pSrc->a[0].fg.notCte );
pSrc->a[0].iCursor = -1;
pSrc->a[0].pTab->nTabRef--;
pSrc->a[0].pTab = 0;
@@ -149764,7 +152744,7 @@ SQLITE_PRIVATE void sqlite3Update(
&& !hasFK
&& !chngKey
&& !bReplace
- && (sNC.ncFlags & NC_Subquery)==0
+ && (pWhere==0 || !ExprHasProperty(pWhere, EP_Subquery))
){
flags |= WHERE_ONEPASS_MULTIROW;
}
@@ -149836,6 +152816,8 @@ SQLITE_PRIVATE void sqlite3Update(
if( !isView ){
int addrOnce = 0;
+ int iNotUsed1 = 0;
+ int iNotUsed2 = 0;
/* Open every index that needs updating. */
if( eOnePass!=ONEPASS_OFF ){
@@ -149847,7 +152829,7 @@ SQLITE_PRIVATE void sqlite3Update(
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur,
- aToOpen, 0, 0);
+ aToOpen, &iNotUsed1, &iNotUsed2);
if( addrOnce ){
sqlite3VdbeJumpHereOrPopInst(v, addrOnce);
}
@@ -150138,8 +153120,10 @@ SQLITE_PRIVATE void sqlite3Update(
sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1);
}
- sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
- TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue);
+ if( pTrigger ){
+ sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges,
+ TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue);
+ }
/* Repeat the above with the next record to be updated, until
** all record selected by the WHERE clause have been updated.
@@ -150234,7 +153218,7 @@ static void updateVirtualTable(
int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */
int regArg; /* First register in VUpdate arg array */
int regRec; /* Register in which to assemble record */
- int regRowid; /* Register for ephem table rowid */
+ int regRowid; /* Register for ephemeral table rowid */
int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */
int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */
int eOnePass; /* True to use onepass strategy */
@@ -150278,7 +153262,9 @@ static void updateVirtualTable(
sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0)
);
}else{
- pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
+ Expr *pRowExpr = exprRowColumn(pParse, i);
+ if( pRowExpr ) pRowExpr->op2 = OPFLAG_NOCHNG;
+ pList = sqlite3ExprListAppend(pParse, pList, pRowExpr);
}
}
@@ -150355,7 +153341,7 @@ static void updateVirtualTable(
sqlite3WhereEnd(pWInfo);
}
- /* Begin scannning through the ephemeral table. */
+ /* Begin scanning through the ephemeral table. */
addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v);
/* Extract arguments from the current row of the ephemeral table and
@@ -150563,7 +153549,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(
pExpr = &sCol[0];
}
for(jj=0; jj<nn; jj++){
- if( sqlite3ExprCompare(pParse,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){
+ if( sqlite3ExprCompare(0,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){
break; /* Column ii of the index matches column jj of target */
}
}
@@ -150912,7 +153898,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
** (possibly synchronous) transaction opened on the main database before
** sqlite3BtreeCopyFile() is called.
**
- ** An optimisation would be to use a non-journaled pager.
+ ** An optimization would be to use a non-journaled pager.
** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
** that actually made the VACUUM run slower. Very little journalling
** actually occurs when doing a vacuum since the vacuum_db is initially
@@ -151435,7 +154421,6 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){
if( p ){
db->pDisconnect = 0;
- sqlite3ExpirePreparedStatements(db, 0);
do {
VTable *pNext = p->pNext;
sqlite3VtabUnlock(p);
@@ -151601,7 +154586,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
** the information we've collected.
**
** The VM register number pParse->regRowid holds the rowid of an
- ** entry in the sqlite_schema table tht was created for this vtab
+ ** entry in the sqlite_schema table that was created for this vtab
** by sqlite3StartTable().
*/
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
@@ -151941,7 +154926,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
sqlite3_mutex_enter(db->mutex);
pCtx = db->pVtabCtx;
if( !pCtx || pCtx->bDeclared ){
- sqlite3Error(db, SQLITE_MISUSE);
+ sqlite3Error(db, SQLITE_MISUSE_BKPT);
sqlite3_mutex_leave(db->mutex);
return SQLITE_MISUSE_BKPT;
}
@@ -152345,7 +155330,7 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){
**
** An eponymous virtual table instance is one that is named after its
** module, and more importantly, does not require a CREATE VIRTUAL TABLE
-** statement in order to come into existance. Eponymous virtual table
+** statement in order to come into existence. Eponymous virtual table
** instances always exist. They cannot be DROP-ed.
**
** Any virtual table module for which xConnect and xCreate are the same
@@ -152536,7 +155521,7 @@ typedef struct WhereRightJoin WhereRightJoin;
/*
** This object is a header on a block of allocated memory that will be
-** automatically freed when its WInfo oject is destructed.
+** automatically freed when its WInfo object is destructed.
*/
struct WhereMemBlock {
WhereMemBlock *pNext; /* Next block in the chain */
@@ -152597,7 +155582,7 @@ struct WhereLevel {
int iCur; /* The VDBE cursor used by this IN operator */
int addrInTop; /* Top of the IN loop */
int iBase; /* Base register of multi-key index record */
- int nPrefix; /* Number of prior entires in the key */
+ int nPrefix; /* Number of prior entries in the key */
u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */
} *aInLoop; /* Information about each nested IN operator */
} in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */
@@ -152847,7 +155832,7 @@ struct WhereClause {
int nTerm; /* Number of terms */
int nSlot; /* Number of entries in a[] */
int nBase; /* Number of terms through the last non-Virtual */
- WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */
+ WhereTerm *a; /* Each a[] describes a term of the WHERE clause */
#if defined(SQLITE_SMALL_STACK)
WhereTerm aStatic[1]; /* Initial static space for a[] */
#else
@@ -153001,7 +155986,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int);
#ifdef WHERETRACE_ENABLED
SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC);
SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm);
-SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC);
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC);
#endif
SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
WhereClause *pWC, /* The WHERE clause to be searched */
@@ -153132,7 +156117,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*);
#define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */
#define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */
#define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */
-#define WHERE_VIEWSCAN 0x02000000 /* A full-scan of a VIEW or subquery */
+ /* 0x02000000 -- available for reuse */
#define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */
#endif /* !defined(SQLITE_WHEREINT_H) */
@@ -153435,6 +156420,12 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
if( wsFlags & WHERE_INDEXED ){
sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur);
}
+ }else{
+ int addr = pSrclist->a[pLvl->iFrom].addrFillSub;
+ VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1);
+ assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine );
+ assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr );
+ sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1);
}
}
}
@@ -153932,7 +156923,7 @@ static int codeAllEqualityTerms(
/* Figure out how many memory cells we will need then allocate them.
*/
regBase = pParse->nMem + 1;
- nReg = pLoop->u.btree.nEq + nExtraReg;
+ nReg = nEq + nExtraReg;
pParse->nMem += nReg;
zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));
@@ -153979,9 +156970,6 @@ static int codeAllEqualityTerms(
sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j);
}
}
- }
- for(j=nSkip; j<nEq; j++){
- pTerm = pLoop->aLTerm[j];
if( pTerm->eOperator & WO_IN ){
if( pTerm->pExpr->flags & EP_xIsSelect ){
/* No affinity ever needs to be (or should be) applied to a value
@@ -154124,7 +157112,7 @@ static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){
** 2) transform the expression node to a TK_REGISTER node that reads
** from the newly populated register.
**
-** Also, if the node is a TK_COLUMN that does access the table idenified
+** Also, if the node is a TK_COLUMN that does access the table identified
** by pCCurHint.iTabCur, and an index is being used (which we will
** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into
** an access of the index rather than the original table.
@@ -154742,7 +157730,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
};
assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
- assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
+ assert( TK_GE==TK_GT+3 ); /* ... is correct. */
assert( (pStart->wtFlags & TERM_VNULL)==0 );
testcase( pStart->wtFlags & TERM_VIRTUAL );
@@ -155922,7 +158910,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop(
** the WHERE clause of SQL statements.
**
** This file was originally part of where.c but was split out to improve
-** readability and editabiliity. This file contains utility routines for
+** readability and editability. This file contains utility routines for
** analyzing Expr objects in the WHERE clause.
*/
/* #include "sqliteInt.h" */
@@ -156138,7 +159126,7 @@ static int isLikeOrGlob(
** range search. The third is because the caller assumes that the pattern
** consists of at least one character after all escapes have been
** removed. */
- if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){
+ if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){
Expr *pPrefix;
/* A "complete" match if the pattern ends with "*" or "%" */
@@ -156711,7 +159699,7 @@ static void exprAnalyzeOrTerm(
pOrTerm->leftCursor))==0 ){
/* This term must be of the form t1.a==t2.b where t2 is in the
** chngToIN set but t1 is not. This term will be either preceded
- ** or follwed by an inverted copy (t2.b==t1.a). Skip this term
+ ** or followed by an inverted copy (t2.b==t1.a). Skip this term
** and use its inversion. */
testcase( pOrTerm->wtFlags & TERM_COPIED );
testcase( pOrTerm->wtFlags & TERM_VIRTUAL );
@@ -156973,8 +159961,8 @@ static void exprAnalyze(
WhereTerm *pTerm; /* The term to be analyzed */
WhereMaskSet *pMaskSet; /* Set of table index masks */
Expr *pExpr; /* The expression to be analyzed */
- Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */
- Bitmask prereqAll; /* Prerequesites of pExpr */
+ Bitmask prereqLeft; /* Prerequisites of the pExpr->pLeft */
+ Bitmask prereqAll; /* Prerequisites of pExpr */
Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */
Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */
int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */
@@ -158460,12 +161448,22 @@ static void translateColumnToCopy(
for(; iStart<iEnd; iStart++, pOp++){
if( pOp->p1!=iTabCur ) continue;
if( pOp->opcode==OP_Column ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Copy;
pOp->p1 = pOp->p2 + iRegister;
pOp->p2 = pOp->p3;
pOp->p3 = 0;
pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */
}else if( pOp->opcode==OP_Rowid ){
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart);
+ }
+#endif
pOp->opcode = OP_Sequence;
pOp->p1 = iAutoidxCur;
#ifdef SQLITE_ALLOW_ROWID_IN_VIEW
@@ -158924,13 +161922,17 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */
int iCur; /* Cursor for table getting the filter */
IndexedExpr *saved_pIdxEpr; /* saved copy of Parse.pIdxEpr */
+ IndexedExpr *saved_pIdxPartExpr; /* saved copy of Parse.pIdxPartExpr */
saved_pIdxEpr = pParse->pIdxEpr;
+ saved_pIdxPartExpr = pParse->pIdxPartExpr;
pParse->pIdxEpr = 0;
+ pParse->pIdxPartExpr = 0;
assert( pLoop!=0 );
assert( v!=0 );
assert( pLoop->wsFlags & WHERE_BLOOMFILTER );
+ assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 );
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
do{
@@ -159020,6 +162022,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
}while( iLevel < pWInfo->nLevel );
sqlite3VdbeJumpHere(v, addrOnce);
pParse->pIdxEpr = saved_pIdxEpr;
+ pParse->pIdxPartExpr = saved_pIdxPartExpr;
}
@@ -159535,7 +162538,7 @@ SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCo
** Value pLoop->nOut is currently set to the estimated number of rows
** visited for scanning (a=? AND b=?). This function reduces that estimate
** by some factor to account for the (c BETWEEN ? AND ?) expression based
-** on the stat4 data for the index. this scan will be peformed multiple
+** on the stat4 data for the index. this scan will be performed multiple
** times (once for each (a,b) combination that matches a=?) is dealt with
** by the caller.
**
@@ -159787,7 +162790,8 @@ static int whereRangeScanEst(
** sample, then assume they are 4x more selective. This brings
** the estimated selectivity more in line with what it would be
** if estimated without the use of STAT4 tables. */
- if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) );
+ if( iLwrIdx==iUprIdx ){ nNew -= 20; }
+ assert( 20==sqlite3LogEst(4) );
}else{
nNew = 10; assert( 10==sqlite3LogEst(2) );
}
@@ -160011,17 +163015,34 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){
#ifdef WHERETRACE_ENABLED
/*
** Print a WhereLoop object for debugging purposes
-*/
-SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
- WhereInfo *pWInfo = pWC->pWInfo;
- int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
- SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
- Table *pTab = pItem->pTab;
- Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
- sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
- p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
- sqlite3DebugPrintf(" %12s",
- pItem->zAlias ? pItem->zAlias : pTab->zName);
+**
+** Format example:
+**
+** .--- Position in WHERE clause rSetup, rRun, nOut ---.
+** | |
+** | .--- selfMask nTerm ------. |
+** | | | |
+** | | .-- prereq Idx wsFlags----. | |
+** | | | Name | | |
+** | | | __|__ nEq ---. ___|__ | __|__
+** | / \ / \ / \ | / \ / \ / \
+** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31
+*/
+SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){
+ if( pWC ){
+ WhereInfo *pWInfo = pWC->pWInfo;
+ int nb = 1+(pWInfo->pTabList->nSrc+3)/4;
+ SrcItem *pItem = pWInfo->pTabList->a + p->iTab;
+ Table *pTab = pItem->pTab;
+ Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1;
+ sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId,
+ p->iTab, nb, p->maskSelf, nb, p->prereq & mAll);
+ sqlite3DebugPrintf(" %12s",
+ pItem->zAlias ? pItem->zAlias : pTab->zName);
+ }else{
+ sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d",
+ p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab);
+ }
if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
const char *zName;
if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){
@@ -160058,6 +163079,15 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
}
}
}
+SQLITE_PRIVATE void sqlite3ShowWhereLoop(const WhereLoop *p){
+ if( p ) sqlite3WhereLoopPrint(p, 0);
+}
+SQLITE_PRIVATE void sqlite3ShowWhereLoopList(const WhereLoop *p){
+ while( p ){
+ sqlite3ShowWhereLoop(p);
+ p = p->pNextLoop;
+ }
+}
#endif
/*
@@ -160170,46 +163200,60 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
}
/*
-** Return TRUE if all of the following are true:
+** Return TRUE if X is a proper subset of Y but is of equal or less cost.
+** In other words, return true if all constraints of X are also part of Y
+** and Y has additional constraints that might speed the search that X lacks
+** but the cost of running X is not more than the cost of running Y.
+**
+** In other words, return true if the cost relationwship between X and Y
+** is inverted and needs to be adjusted.
**
-** (1) X has the same or lower cost, or returns the same or fewer rows,
-** than Y.
-** (2) X uses fewer WHERE clause terms than Y
-** (3) Every WHERE clause term used by X is also used by Y
-** (4) X skips at least as many columns as Y
-** (5) If X is a covering index, than Y is too
+** Case 1:
**
-** Conditions (2) and (3) mean that X is a "proper subset" of Y.
-** If X is a proper subset of Y then Y is a better choice and ought
-** to have a lower cost. This routine returns TRUE when that cost
-** relationship is inverted and needs to be adjusted. Constraint (4)
-** was added because if X uses skip-scan less than Y it still might
-** deserve a lower cost even if it is a proper subset of Y. Constraint (5)
-** was added because a covering index probably deserves to have a lower cost
-** than a non-covering index even if it is a proper subset.
+** (1a) X and Y use the same index.
+** (1b) X has fewer == terms than Y
+** (1c) Neither X nor Y use skip-scan
+** (1d) X does not have a a greater cost than Y
+**
+** Case 2:
+**
+** (2a) X has the same or lower cost, or returns the same or fewer rows,
+** than Y.
+** (2b) X uses fewer WHERE clause terms than Y
+** (2c) Every WHERE clause term used by X is also used by Y
+** (2d) X skips at least as many columns as Y
+** (2e) If X is a covering index, than Y is too
*/
static int whereLoopCheaperProperSubset(
const WhereLoop *pX, /* First WhereLoop to compare */
const WhereLoop *pY /* Compare against this WhereLoop */
){
int i, j;
+ if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */
+ assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 );
+ if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */
+ && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */
+ && pX->nSkip==0 && pY->nSkip==0 /* (1c) */
+ ){
+ return 1; /* Case 1 is true */
+ }
if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){
- return 0; /* X is not a subset of Y */
+ return 0; /* (2b) */
}
- if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0;
- if( pY->nSkip > pX->nSkip ) return 0;
+ if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */
for(i=pX->nLTerm-1; i>=0; i--){
if( pX->aLTerm[i]==0 ) continue;
for(j=pY->nLTerm-1; j>=0; j--){
if( pY->aLTerm[j]==pX->aLTerm[i] ) break;
}
- if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */
+ if( j<0 ) return 0; /* (2c) */
}
if( (pX->wsFlags&WHERE_IDX_ONLY)!=0
&& (pY->wsFlags&WHERE_IDX_ONLY)==0 ){
- return 0; /* Constraint (5) */
+ return 0; /* (2e) */
}
- return 1; /* All conditions meet */
+ return 1; /* Case 2 is true */
}
/*
@@ -160290,7 +163334,7 @@ static WhereLoop **whereLoopFindLesser(
** rSetup. Call this SETUP-INVARIANT */
assert( p->rSetup>=pTemplate->rSetup );
- /* Any loop using an appliation-defined index (or PRIMARY KEY or
+ /* Any loop using an application-defined index (or PRIMARY KEY or
** UNIQUE constraint) with one or more == constraints is better
** than an automatic index. Unless it is a skip-scan. */
if( (p->wsFlags & WHERE_AUTO_INDEX)!=0
@@ -160317,7 +163361,7 @@ static WhereLoop **whereLoopFindLesser(
/* If pTemplate is always better than p, then cause p to be overwritten
** with pTemplate. pTemplate is better than p if:
- ** (1) pTemplate has no more dependences than p, and
+ ** (1) pTemplate has no more dependencies than p, and
** (2) pTemplate has an equal or lower cost than p.
*/
if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */
@@ -160435,7 +163479,7 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
}else{
/* We will be overwriting WhereLoop p[]. But before we do, first
** go through the rest of the list and delete any other entries besides
- ** p[] that are also supplated by pTemplate */
+ ** p[] that are also supplanted by pTemplate */
WhereLoop **ppTail = &p->pNextLoop;
WhereLoop *pToDel;
while( *ppTail ){
@@ -160635,7 +163679,7 @@ static int whereRangeVectorLen(
}
/*
-** Adjust the cost C by the costMult facter T. This only occurs if
+** Adjust the cost C by the costMult factor T. This only occurs if
** compiled with -DSQLITE_ENABLE_COSTMULT
*/
#ifdef SQLITE_ENABLE_COSTMULT
@@ -160662,7 +163706,7 @@ static int whereLoopAddBtreeIndex(
Index *pProbe, /* An index on pSrc */
LogEst nInMul /* log(Number of iterations due to IN) */
){
- WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */
+ WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyze context */
Parse *pParse = pWInfo->pParse; /* Parsing context */
sqlite3 *db = pParse->db; /* Database connection malloc context */
WhereLoop *pNew; /* Template WhereLoop under construction */
@@ -160699,7 +163743,10 @@ static int whereLoopAddBtreeIndex(
assert( pNew->u.btree.nBtm==0 );
opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
}
- if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bUnordered || pProbe->bLowQual ){
+ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
+ if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS);
+ }
assert( pNew->u.btree.nEq<pProbe->nColumn );
assert( pNew->u.btree.nEq<pProbe->nKeyCol
@@ -160972,7 +164019,7 @@ static int whereLoopAddBtreeIndex(
assert( pSrc->pTab->szTabRow>0 );
if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){
/* The pProbe->szIdxRow is low for an IPK table since the interior
- ** pages are small. Thuse szIdxRow gives a good estimate of seek cost.
+ ** pages are small. Thus szIdxRow gives a good estimate of seek cost.
** But the leaf pages are full-size, so pProbe->szIdxRow would badly
** under-estimate the scanning cost. */
rCostIdx = pNew->nOut + 16;
@@ -161280,6 +164327,100 @@ static SQLITE_NOINLINE u32 whereIsCoveringIndex(
}
/*
+** This is an sqlite3ParserAddCleanup() callback that is invoked to
+** free the Parse->pIdxEpr list when the Parse object is destroyed.
+*/
+static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
+ IndexedExpr **pp = (IndexedExpr**)pObject;
+ while( *pp!=0 ){
+ IndexedExpr *p = *pp;
+ *pp = p->pIENext;
+ sqlite3ExprDelete(db, p->pExpr);
+ sqlite3DbFreeNN(db, p);
+ }
+}
+
+/*
+** This function is called for a partial index - one with a WHERE clause - in
+** two scenarios. In both cases, it determines whether or not the WHERE
+** clause on the index implies that a column of the table may be safely
+** replaced by a constant expression. For example, in the following
+** SELECT:
+**
+** CREATE INDEX i1 ON t1(b, c) WHERE a=<expr>;
+** SELECT a, b, c FROM t1 WHERE a=<expr> AND b=?;
+**
+** The "a" in the select-list may be replaced by <expr>, iff:
+**
+** (a) <expr> is a constant expression, and
+** (b) The (a=<expr>) comparison uses the BINARY collation sequence, and
+** (c) Column "a" has an affinity other than NONE or BLOB.
+**
+** If argument pItem is NULL, then pMask must not be NULL. In this case this
+** function is being called as part of determining whether or not pIdx
+** is a covering index. This function clears any bits in (*pMask)
+** corresponding to columns that may be replaced by constants as described
+** above.
+**
+** Otherwise, if pItem is not NULL, then this function is being called
+** as part of coding a loop that uses index pIdx. In this case, add entries
+** to the Parse.pIdxPartExpr list for each column that can be replaced
+** by a constant.
+*/
+static void wherePartIdxExpr(
+ Parse *pParse, /* Parse context */
+ Index *pIdx, /* Partial index being processed */
+ Expr *pPart, /* WHERE clause being processed */
+ Bitmask *pMask, /* Mask to clear bits in */
+ int iIdxCur, /* Cursor number for index */
+ SrcItem *pItem /* The FROM clause entry for the table */
+){
+ assert( pItem==0 || (pItem->fg.jointype & JT_RIGHT)==0 );
+ assert( (pItem==0 || pMask==0) && (pMask!=0 || pItem!=0) );
+
+ if( pPart->op==TK_AND ){
+ wherePartIdxExpr(pParse, pIdx, pPart->pRight, pMask, iIdxCur, pItem);
+ pPart = pPart->pLeft;
+ }
+
+ if( (pPart->op==TK_EQ || pPart->op==TK_IS) ){
+ Expr *pLeft = pPart->pLeft;
+ Expr *pRight = pPart->pRight;
+ u8 aff;
+
+ if( pLeft->op!=TK_COLUMN ) return;
+ if( !sqlite3ExprIsConstant(pRight) ) return;
+ if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return;
+ if( pLeft->iColumn<0 ) return;
+ aff = pIdx->pTable->aCol[pLeft->iColumn].affinity;
+ if( aff>=SQLITE_AFF_TEXT ){
+ if( pItem ){
+ sqlite3 *db = pParse->db;
+ IndexedExpr *p = (IndexedExpr*)sqlite3DbMallocRaw(db, sizeof(*p));
+ if( p ){
+ int bNullRow = (pItem->fg.jointype&(JT_LEFT|JT_LTORJ))!=0;
+ p->pExpr = sqlite3ExprDup(db, pRight, 0);
+ p->iDataCur = pItem->iCursor;
+ p->iIdxCur = iIdxCur;
+ p->iIdxCol = pLeft->iColumn;
+ p->bMaybeNullRow = bNullRow;
+ p->pIENext = pParse->pIdxPartExpr;
+ p->aff = aff;
+ pParse->pIdxPartExpr = p;
+ if( p->pIENext==0 ){
+ void *pArg = (void*)&pParse->pIdxPartExpr;
+ sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg);
+ }
+ }
+ }else if( pLeft->iColumn<(BMS-1) ){
+ *pMask &= ~((Bitmask)1 << pLeft->iColumn);
+ }
+ }
+ }
+}
+
+
+/*
** Add all WhereLoop objects for a single table of the join where the table
** is identified by pBuilder->pNew->iTab. That table is guaranteed to be
** a b-tree table, not a virtual table.
@@ -161317,7 +164458,7 @@ static SQLITE_NOINLINE u32 whereIsCoveringIndex(
*/
static int whereLoopAddBtree(
WhereLoopBuilder *pBuilder, /* WHERE clause information */
- Bitmask mPrereq /* Extra prerequesites for using this table */
+ Bitmask mPrereq /* Extra prerequisites for using this table */
){
WhereInfo *pWInfo; /* WHERE analysis context */
Index *pProbe; /* An index we are evaluating */
@@ -161482,9 +164623,6 @@ static int whereLoopAddBtree(
#else
pNew->rRun = rSize + 16;
#endif
- if( IsView(pTab) || (pTab->tabFlags & TF_Ephemeral)!=0 ){
- pNew->wsFlags |= WHERE_VIEWSCAN;
- }
ApplyCostMultiplier(pNew->rRun, pTab->costMult);
whereLoopOutputAdjust(pWC, pNew, rSize);
rc = whereLoopInsert(pBuilder, pNew);
@@ -161497,6 +164635,11 @@ static int whereLoopAddBtree(
pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
}else{
m = pSrc->colUsed & pProbe->colNotIdxed;
+ if( pProbe->pPartIdxWhere ){
+ wherePartIdxExpr(
+ pWInfo->pParse, pProbe, pProbe->pPartIdxWhere, &m, 0, 0
+ );
+ }
pNew->wsFlags = WHERE_INDEXED;
if( m==TOPBIT || (pProbe->bHasExpr && !pProbe->bHasVCol && m!=0) ){
u32 isCov = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor);
@@ -161824,7 +164967,7 @@ static int whereLoopAddVirtualOne(
**
** Return a pointer to the collation name:
**
-** 1. If there is an explicit COLLATE operator on the constaint, return it.
+** 1. If there is an explicit COLLATE operator on the constraint, return it.
**
** 2. Else, if the column has an alternative collation, return that.
**
@@ -161879,7 +165022,7 @@ SQLITE_API int sqlite3_vtab_rhs_value(
sqlite3_value *pVal = 0;
int rc = SQLITE_OK;
if( iCons<0 || iCons>=pIdxInfo->nConstraint ){
- rc = SQLITE_MISUSE; /* EV: R-30545-25046 */
+ rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */
}else{
if( pH->aRhs[iCons]==0 ){
WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset];
@@ -162785,7 +165928,8 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
** For joins of 3 or more tables, track the 10 best paths */
mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10);
assert( nLoop<=pWInfo->pTabList->nSrc );
- WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst));
+ WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n",
+ nRowEst, pParse->nQueryLoop));
/* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this
** case the purpose of this call is to estimate the number of rows returned
@@ -162888,7 +166032,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
);
}
/* TUNING: Add a small extra penalty (3) to sorting as an
- ** extra encouragment to the query planner to select a plan
+ ** extra encouragement to the query planner to select a plan
** where the rows emerge in the correct order without any sorting
** required. */
rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3;
@@ -162902,13 +166046,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */
}
- /* TUNING: A full-scan of a VIEW or subquery in the outer loop
- ** is not so bad. */
- if( iLoop==0 && (pWLoop->wsFlags & WHERE_VIEWSCAN)!=0 ){
- rCost += -10;
- nOut += -30;
- }
-
/* Check to see if pWLoop should be added to the set of
** mxChoice best-so-far paths.
**
@@ -163459,20 +166596,6 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(
}
/*
-** This is an sqlite3ParserAddCleanup() callback that is invoked to
-** free the Parse->pIdxEpr list when the Parse object is destroyed.
-*/
-static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
- Parse *pParse = (Parse*)pObject;
- while( pParse->pIdxEpr!=0 ){
- IndexedExpr *p = pParse->pIdxEpr;
- pParse->pIdxEpr = p->pIENext;
- sqlite3ExprDelete(db, p->pExpr);
- sqlite3DbFreeNN(db, p);
- }
-}
-
-/*
** The index pIdx is used by a query and contains one or more expressions.
** In other words pIdx is an index on an expression. iIdxCur is the cursor
** number for the index and iDataCur is the cursor number for the corresponding
@@ -163511,6 +166634,20 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
continue;
}
if( sqlite3ExprIsConstant(pExpr) ) continue;
+ if( pExpr->op==TK_FUNCTION ){
+ /* Functions that might set a subtype should not be replaced by the
+ ** value taken from an expression index since the index omits the
+ ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */
+ int n;
+ FuncDef *pDef;
+ sqlite3 *db = pParse->db;
+ assert( ExprUseXList(pExpr) );
+ n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0;
+ pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0);
+ if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){
+ continue;
+ }
+ }
p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr));
if( p==0 ) break;
p->pIENext = pParse->pIdxEpr;
@@ -163533,7 +166670,30 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
#endif
pParse->pIdxEpr = p;
if( p->pIENext==0 ){
- sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse);
+ void *pArg = (void*)&pParse->pIdxEpr;
+ sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg);
+ }
+ }
+}
+
+/*
+** Set the reverse-scan order mask to one for all tables in the query
+** with the exception of MATERIALIZED common table expressions that have
+** their own internal ORDER BY clauses.
+**
+** This implements the PRAGMA reverse_unordered_selects=ON setting.
+** (Also SQLITE_DBCONFIG_REVERSE_SCANORDER).
+*/
+static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){
+ int ii;
+ for(ii=0; ii<pWInfo->pTabList->nSrc; ii++){
+ SrcItem *pItem = &pWInfo->pTabList->a[ii];
+ if( !pItem->fg.isCte
+ || pItem->u2.pCteUse->eM10d!=M10d_Yes
+ || NEVER(pItem->pSelect==0)
+ || pItem->pSelect->pOrderBy==0
+ ){
+ pWInfo->revMask |= MASKBIT(ii);
}
}
}
@@ -163596,7 +166756,7 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
**
** OUTER JOINS
**
-** An outer join of tables t1 and t2 is conceptally coded as follows:
+** An outer join of tables t1 and t2 is conceptually coded as follows:
**
** foreach row1 in t1 do
** flag = 0
@@ -163666,7 +166826,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */
testcase( pOrderBy && pOrderBy->nExpr==BMS-1 );
- if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0;
+ if( pOrderBy && pOrderBy->nExpr>=BMS ){
+ pOrderBy = 0;
+ wctrlFlags &= ~WHERE_WANT_DISTINCT;
+ }
/* The number of tables in the FROM clause is limited by the number of
** bits in a Bitmask
@@ -163691,7 +166854,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** field (type Bitmask) it must be aligned on an 8-byte boundary on
** some architectures. Hence the ROUND8() below.
*/
- nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
+ nByteWInfo = ROUND8P(sizeof(WhereInfo));
+ if( nTabList>1 ){
+ nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel));
+ }
pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
if( db->mallocFailed ){
sqlite3DbFree(db, pWInfo);
@@ -163751,7 +166917,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
**
** The N-th term of the FROM clause is assigned a bitmask of 1<<N.
**
- ** The rule of the previous sentence ensures thta if X is the bitmask for
+ ** The rule of the previous sentence ensures that if X is the bitmask for
** a table T, then X-1 is the bitmask for all other tables to the left of T.
** Knowing the bitmask for all tables to the left of a left join is
** important. Ticket #3015.
@@ -163901,9 +167067,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
wherePathSolver(pWInfo, pWInfo->nRowOut+1);
if( db->mallocFailed ) goto whereBeginError;
}
+
+ /* TUNING: Assume that a DISTINCT clause on a subquery reduces
+ ** the output size by a factor of 8 (LogEst -30).
+ */
+ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){
+ WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n",
+ pWInfo->nRowOut, pWInfo->nRowOut-30));
+ pWInfo->nRowOut -= 30;
+ }
+
}
+ assert( pWInfo->pTabList!=0 );
if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){
- pWInfo->revMask = ALLBITS;
+ whereReverseScanOrder(pWInfo);
}
if( pParse->nErr ){
goto whereBeginError;
@@ -164003,6 +167180,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW)
&& !IsVirtual(pTabList->a[0].pTab)
&& (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK))
+ && OptimizationEnabled(db, SQLITE_OnePass)
)){
pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI;
if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){
@@ -164111,6 +167289,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){
whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem);
}
+ if( pIx->pPartIdxWhere && (pTabItem->fg.jointype & JT_RIGHT)==0 ){
+ wherePartIdxExpr(
+ pParse, pIx, pIx->pPartIdxWhere, 0, iIndexCur, pTabItem
+ );
+ }
}
pLevel->iIdxCur = iIndexCur;
assert( pIx!=0 );
@@ -164236,6 +167419,11 @@ whereBeginError:
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
}
+#ifdef WHERETRACE_ENABLED
+ /* Prevent harmless compiler warnings about debugging routines
+ ** being declared but never used */
+ sqlite3ShowWhereLoopList(0);
+#endif /* WHERETRACE_ENABLED */
return 0;
}
@@ -164732,7 +167920,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
**
** These are the same built-in window functions supported by Postgres.
** Although the behaviour of aggregate window functions (functions that
-** can be used as either aggregates or window funtions) allows them to
+** can be used as either aggregates or window functions) allows them to
** be implemented using an API, built-in window functions are much more
** esoteric. Additionally, some window functions (e.g. nth_value())
** may only be implemented by caching the entire partition in memory.
@@ -165262,7 +168450,7 @@ static Window *windowFind(Parse *pParse, Window *pList, const char *zName){
** is the Window object representing the associated OVER clause. This
** function updates the contents of pWin as follows:
**
-** * If the OVER clause refered to a named window (as in "max(x) OVER win"),
+** * If the OVER clause referred to a named window (as in "max(x) OVER win"),
** search list pList for a matching WINDOW definition, and update pWin
** accordingly. If no such WINDOW clause can be found, leave an error
** in pParse.
@@ -165653,7 +168841,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
assert( ExprUseXList(pWin->pOwner) );
assert( pWin->pWFunc!=0 );
pArgs = pWin->pOwner->x.pList;
- if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){
+ if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){
selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist);
pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
pWin->bExprArgs = 1;
@@ -165883,7 +169071,7 @@ SQLITE_PRIVATE Window *sqlite3WindowAssemble(
}
/*
-** Window *pWin has just been created from a WINDOW clause. Tokne pBase
+** Window *pWin has just been created from a WINDOW clause. Token pBase
** is the base window. Earlier windows from the same WINDOW clause are
** stored in the linked list starting at pWin->pNextWin. This function
** either updates *pWin according to the base specification, or else
@@ -165927,8 +169115,9 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){
if( p ){
assert( p->op==TK_FUNCTION );
assert( pWin );
+ assert( ExprIsFullSize(p) );
p->y.pWin = pWin;
- ExprSetProperty(p, EP_WinFunc);
+ ExprSetProperty(p, EP_WinFunc|EP_FullSize);
pWin->pOwner = p;
if( (p->flags & EP_Distinct) && pWin->eFrmType!=TK_FILTER ){
sqlite3ErrorMsg(pParse,
@@ -166189,7 +169378,7 @@ struct WindowCsrAndReg {
**
** (ORDER BY a, b GROUPS BETWEEN 2 PRECEDING AND 2 FOLLOWING)
**
-** The windows functions implmentation caches the input rows in a temp
+** The windows functions implementation caches the input rows in a temp
** table, sorted by "a, b" (it actually populates the cache lazily, and
** aggressively removes rows once they are no longer required, but that's
** a mere detail). It keeps three cursors open on the temp table. One
@@ -167198,7 +170387,7 @@ static int windowExprGtZero(Parse *pParse, Expr *pExpr){
**
** For the most part, the patterns above are adapted to support UNBOUNDED by
** assuming that it is equivalent to "infinity PRECEDING/FOLLOWING" and
-** CURRENT ROW by assuming that it is equivilent to "0 PRECEDING/FOLLOWING".
+** CURRENT ROW by assuming that it is equivalent to "0 PRECEDING/FOLLOWING".
** This is optimized of course - branches that will never be taken and
** conditions that are always true are omitted from the VM code. The only
** exceptional case is:
@@ -167477,7 +170666,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
}
/* Allocate registers for the array of values from the sub-query, the
- ** samve values in record form, and the rowid used to insert said record
+ ** same values in record form, and the rowid used to insert said record
** into the ephemeral table. */
regNew = pParse->nMem+1;
pParse->nMem += nInput;
@@ -167718,7 +170907,8 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
/************** End of window.c **********************************************/
/************** Begin file parse.c *******************************************/
/* This file is automatically generated by Lemon from input grammar
-** source file "parse.y". */
+** source file "parse.y".
+*/
/*
** 2001-09-15
**
@@ -167735,7 +170925,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
** The canonical source code to this file ("parse.y") is a Lemon grammar
** file that specifies the input grammar and actions to take while parsing.
** That input file is processed by Lemon to generate a C-language
-** implementation of a parser for the given grammer. You might be reading
+** implementation of a parser for the given grammar. You might be reading
** this comment as part of the translated C-code. Edits should be made
** to the original parse.y sources.
*/
@@ -168229,18 +171419,18 @@ typedef union {
#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse;
#define sqlite3ParserCTX_STORE yypParser->pParse=pParse;
#define YYFALLBACK 1
-#define YYNSTATE 575
-#define YYNRULE 403
+#define YYNSTATE 579
+#define YYNRULE 405
#define YYNRULE_WITH_ACTION 340
#define YYNTOKEN 185
-#define YY_MAX_SHIFT 574
-#define YY_MIN_SHIFTREDUCE 833
-#define YY_MAX_SHIFTREDUCE 1235
-#define YY_ERROR_ACTION 1236
-#define YY_ACCEPT_ACTION 1237
-#define YY_NO_ACTION 1238
-#define YY_MIN_REDUCE 1239
-#define YY_MAX_REDUCE 1641
+#define YY_MAX_SHIFT 578
+#define YY_MIN_SHIFTREDUCE 838
+#define YY_MAX_SHIFTREDUCE 1242
+#define YY_ERROR_ACTION 1243
+#define YY_ACCEPT_ACTION 1244
+#define YY_NO_ACTION 1245
+#define YY_MIN_REDUCE 1246
+#define YY_MAX_REDUCE 1650
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -168307,218 +171497,218 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2096)
+#define YY_ACTTAB_COUNT (2100)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 568, 208, 568, 118, 115, 229, 568, 118, 115, 229,
- /* 10 */ 568, 1310, 377, 1289, 408, 562, 562, 562, 568, 409,
- /* 20 */ 378, 1310, 1272, 41, 41, 41, 41, 208, 1520, 71,
- /* 30 */ 71, 969, 419, 41, 41, 491, 303, 279, 303, 970,
- /* 40 */ 397, 71, 71, 125, 126, 80, 1212, 1212, 1047, 1050,
- /* 50 */ 1037, 1037, 123, 123, 124, 124, 124, 124, 476, 409,
- /* 60 */ 1237, 1, 1, 574, 2, 1241, 550, 118, 115, 229,
- /* 70 */ 317, 480, 146, 480, 524, 118, 115, 229, 529, 1323,
- /* 80 */ 417, 523, 142, 125, 126, 80, 1212, 1212, 1047, 1050,
- /* 90 */ 1037, 1037, 123, 123, 124, 124, 124, 124, 118, 115,
- /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120,
- /* 110 */ 120, 119, 116, 444, 284, 284, 284, 284, 442, 442,
- /* 120 */ 442, 1561, 376, 1563, 1188, 375, 1159, 565, 1159, 565,
- /* 130 */ 409, 1561, 537, 259, 226, 444, 101, 145, 449, 316,
- /* 140 */ 559, 240, 122, 122, 122, 122, 121, 121, 120, 120,
- /* 150 */ 120, 119, 116, 444, 125, 126, 80, 1212, 1212, 1047,
- /* 160 */ 1050, 1037, 1037, 123, 123, 124, 124, 124, 124, 142,
- /* 170 */ 294, 1188, 339, 448, 120, 120, 120, 119, 116, 444,
- /* 180 */ 127, 1188, 1189, 1188, 148, 441, 440, 568, 119, 116,
- /* 190 */ 444, 124, 124, 124, 124, 117, 122, 122, 122, 122,
- /* 200 */ 121, 121, 120, 120, 120, 119, 116, 444, 454, 113,
- /* 210 */ 13, 13, 546, 122, 122, 122, 122, 121, 121, 120,
- /* 220 */ 120, 120, 119, 116, 444, 422, 316, 559, 1188, 1189,
- /* 230 */ 1188, 149, 1220, 409, 1220, 124, 124, 124, 124, 122,
- /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
- /* 250 */ 444, 465, 342, 1034, 1034, 1048, 1051, 125, 126, 80,
- /* 260 */ 1212, 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124,
- /* 270 */ 124, 124, 1275, 522, 222, 1188, 568, 409, 224, 514,
- /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120,
- /* 290 */ 120, 120, 119, 116, 444, 1005, 16, 16, 1188, 133,
- /* 300 */ 133, 125, 126, 80, 1212, 1212, 1047, 1050, 1037, 1037,
- /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122,
- /* 320 */ 121, 121, 120, 120, 120, 119, 116, 444, 1038, 546,
- /* 330 */ 1188, 373, 1188, 1189, 1188, 252, 1429, 399, 504, 501,
- /* 340 */ 500, 111, 560, 566, 4, 924, 924, 433, 499, 340,
- /* 350 */ 460, 328, 360, 394, 1233, 1188, 1189, 1188, 563, 568,
- /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119,
- /* 370 */ 116, 444, 284, 284, 369, 1574, 1600, 441, 440, 154,
- /* 380 */ 409, 445, 71, 71, 1282, 565, 1217, 1188, 1189, 1188,
- /* 390 */ 85, 1219, 271, 557, 543, 515, 1555, 568, 98, 1218,
- /* 400 */ 6, 1274, 472, 142, 125, 126, 80, 1212, 1212, 1047,
- /* 410 */ 1050, 1037, 1037, 123, 123, 124, 124, 124, 124, 550,
- /* 420 */ 13, 13, 1024, 507, 1220, 1188, 1220, 549, 109, 109,
- /* 430 */ 222, 568, 1234, 175, 568, 427, 110, 197, 445, 569,
- /* 440 */ 445, 430, 1546, 1014, 325, 551, 1188, 270, 287, 368,
- /* 450 */ 510, 363, 509, 257, 71, 71, 543, 71, 71, 359,
- /* 460 */ 316, 559, 1606, 122, 122, 122, 122, 121, 121, 120,
- /* 470 */ 120, 120, 119, 116, 444, 1014, 1014, 1016, 1017, 27,
- /* 480 */ 284, 284, 1188, 1189, 1188, 1154, 568, 1605, 409, 899,
- /* 490 */ 190, 550, 356, 565, 550, 935, 533, 517, 1154, 516,
- /* 500 */ 413, 1154, 552, 1188, 1189, 1188, 568, 544, 1548, 51,
- /* 510 */ 51, 214, 125, 126, 80, 1212, 1212, 1047, 1050, 1037,
- /* 520 */ 1037, 123, 123, 124, 124, 124, 124, 1188, 474, 135,
- /* 530 */ 135, 409, 284, 284, 1484, 505, 121, 121, 120, 120,
- /* 540 */ 120, 119, 116, 444, 1005, 565, 518, 217, 541, 1555,
- /* 550 */ 316, 559, 142, 6, 532, 125, 126, 80, 1212, 1212,
- /* 560 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 570 */ 1549, 122, 122, 122, 122, 121, 121, 120, 120, 120,
- /* 580 */ 119, 116, 444, 485, 1188, 1189, 1188, 482, 281, 1263,
- /* 590 */ 955, 252, 1188, 373, 504, 501, 500, 1188, 340, 570,
- /* 600 */ 1188, 570, 409, 292, 499, 955, 874, 191, 480, 316,
- /* 610 */ 559, 384, 290, 380, 122, 122, 122, 122, 121, 121,
- /* 620 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 630 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 640 */ 124, 409, 394, 1132, 1188, 867, 100, 284, 284, 1188,
- /* 650 */ 1189, 1188, 373, 1089, 1188, 1189, 1188, 1188, 1189, 1188,
- /* 660 */ 565, 455, 32, 373, 233, 125, 126, 80, 1212, 1212,
- /* 670 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 680 */ 1428, 957, 568, 228, 956, 122, 122, 122, 122, 121,
- /* 690 */ 121, 120, 120, 120, 119, 116, 444, 1154, 228, 1188,
- /* 700 */ 157, 1188, 1189, 1188, 1547, 13, 13, 301, 955, 1228,
- /* 710 */ 1154, 153, 409, 1154, 373, 1577, 1172, 5, 369, 1574,
- /* 720 */ 429, 1234, 3, 955, 122, 122, 122, 122, 121, 121,
- /* 730 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 740 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 750 */ 124, 409, 208, 567, 1188, 1025, 1188, 1189, 1188, 1188,
- /* 760 */ 388, 850, 155, 1546, 286, 402, 1094, 1094, 488, 568,
- /* 770 */ 465, 342, 1315, 1315, 1546, 125, 126, 80, 1212, 1212,
- /* 780 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 790 */ 129, 568, 13, 13, 374, 122, 122, 122, 122, 121,
- /* 800 */ 121, 120, 120, 120, 119, 116, 444, 302, 568, 453,
- /* 810 */ 528, 1188, 1189, 1188, 13, 13, 1188, 1189, 1188, 1293,
- /* 820 */ 463, 1263, 409, 1313, 1313, 1546, 1010, 453, 452, 200,
- /* 830 */ 299, 71, 71, 1261, 122, 122, 122, 122, 121, 121,
- /* 840 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 850 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 860 */ 124, 409, 227, 1069, 1154, 284, 284, 419, 312, 278,
- /* 870 */ 278, 285, 285, 1415, 406, 405, 382, 1154, 565, 568,
- /* 880 */ 1154, 1191, 565, 1594, 565, 125, 126, 80, 1212, 1212,
- /* 890 */ 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124, 124,
- /* 900 */ 453, 1476, 13, 13, 1530, 122, 122, 122, 122, 121,
- /* 910 */ 121, 120, 120, 120, 119, 116, 444, 201, 568, 354,
- /* 920 */ 1580, 574, 2, 1241, 838, 839, 840, 1556, 317, 1207,
- /* 930 */ 146, 6, 409, 255, 254, 253, 206, 1323, 9, 1191,
- /* 940 */ 262, 71, 71, 424, 122, 122, 122, 122, 121, 121,
- /* 950 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1212,
- /* 960 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 970 */ 124, 568, 284, 284, 568, 1208, 409, 573, 313, 1241,
- /* 980 */ 349, 1292, 352, 419, 317, 565, 146, 491, 525, 1637,
- /* 990 */ 395, 371, 491, 1323, 70, 70, 1291, 71, 71, 240,
- /* 1000 */ 1321, 104, 80, 1212, 1212, 1047, 1050, 1037, 1037, 123,
- /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121,
- /* 1020 */ 121, 120, 120, 120, 119, 116, 444, 1110, 284, 284,
- /* 1030 */ 428, 448, 1519, 1208, 439, 284, 284, 1483, 1348, 311,
- /* 1040 */ 474, 565, 1111, 969, 491, 491, 217, 1259, 565, 1532,
- /* 1050 */ 568, 970, 207, 568, 1024, 240, 383, 1112, 519, 122,
- /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
- /* 1070 */ 444, 1015, 107, 71, 71, 1014, 13, 13, 910, 568,
- /* 1080 */ 1489, 568, 284, 284, 97, 526, 491, 448, 911, 1322,
- /* 1090 */ 1318, 545, 409, 284, 284, 565, 151, 209, 1489, 1491,
- /* 1100 */ 262, 450, 55, 55, 56, 56, 565, 1014, 1014, 1016,
- /* 1110 */ 443, 332, 409, 527, 12, 295, 125, 126, 80, 1212,
- /* 1120 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1130 */ 124, 347, 409, 862, 1528, 1208, 125, 126, 80, 1212,
- /* 1140 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1150 */ 124, 1133, 1635, 474, 1635, 371, 125, 114, 80, 1212,
- /* 1160 */ 1212, 1047, 1050, 1037, 1037, 123, 123, 124, 124, 124,
- /* 1170 */ 124, 1489, 329, 474, 331, 122, 122, 122, 122, 121,
- /* 1180 */ 121, 120, 120, 120, 119, 116, 444, 203, 1415, 568,
- /* 1190 */ 1290, 862, 464, 1208, 436, 122, 122, 122, 122, 121,
- /* 1200 */ 121, 120, 120, 120, 119, 116, 444, 553, 1133, 1636,
- /* 1210 */ 539, 1636, 15, 15, 890, 122, 122, 122, 122, 121,
- /* 1220 */ 121, 120, 120, 120, 119, 116, 444, 568, 298, 538,
- /* 1230 */ 1131, 1415, 1553, 1554, 1327, 409, 6, 6, 1165, 1264,
- /* 1240 */ 415, 320, 284, 284, 1415, 508, 565, 525, 300, 457,
- /* 1250 */ 43, 43, 568, 891, 12, 565, 330, 478, 425, 407,
- /* 1260 */ 126, 80, 1212, 1212, 1047, 1050, 1037, 1037, 123, 123,
- /* 1270 */ 124, 124, 124, 124, 568, 57, 57, 288, 1188, 1415,
- /* 1280 */ 496, 458, 392, 392, 391, 273, 389, 1131, 1552, 847,
- /* 1290 */ 1165, 407, 6, 568, 321, 1154, 470, 44, 44, 1551,
- /* 1300 */ 1110, 426, 234, 6, 323, 256, 540, 256, 1154, 431,
- /* 1310 */ 568, 1154, 322, 17, 487, 1111, 58, 58, 122, 122,
- /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 444,
- /* 1330 */ 1112, 216, 481, 59, 59, 1188, 1189, 1188, 111, 560,
- /* 1340 */ 324, 4, 236, 456, 526, 568, 237, 456, 568, 437,
- /* 1350 */ 168, 556, 420, 141, 479, 563, 568, 293, 568, 1091,
- /* 1360 */ 568, 293, 568, 1091, 531, 568, 870, 8, 60, 60,
- /* 1370 */ 235, 61, 61, 568, 414, 568, 414, 568, 445, 62,
- /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49,
- /* 1390 */ 557, 568, 359, 568, 100, 486, 50, 50, 63, 63,
- /* 1400 */ 64, 64, 561, 415, 535, 410, 568, 1024, 568, 534,
- /* 1410 */ 316, 559, 316, 559, 65, 65, 14, 14, 568, 1024,
- /* 1420 */ 568, 512, 930, 870, 1015, 109, 109, 929, 1014, 66,
- /* 1430 */ 66, 131, 131, 110, 451, 445, 569, 445, 416, 177,
- /* 1440 */ 1014, 132, 132, 67, 67, 568, 467, 568, 930, 471,
- /* 1450 */ 1360, 283, 226, 929, 315, 1359, 407, 568, 459, 407,
- /* 1460 */ 1014, 1014, 1016, 239, 407, 86, 213, 1346, 52, 52,
- /* 1470 */ 68, 68, 1014, 1014, 1016, 1017, 27, 1579, 1176, 447,
- /* 1480 */ 69, 69, 288, 97, 108, 1535, 106, 392, 392, 391,
- /* 1490 */ 273, 389, 568, 877, 847, 881, 568, 111, 560, 466,
- /* 1500 */ 4, 568, 152, 30, 38, 568, 1128, 234, 396, 323,
- /* 1510 */ 111, 560, 527, 4, 563, 53, 53, 322, 568, 163,
- /* 1520 */ 163, 568, 337, 468, 164, 164, 333, 563, 76, 76,
- /* 1530 */ 568, 289, 1508, 568, 31, 1507, 568, 445, 338, 483,
- /* 1540 */ 100, 54, 54, 344, 72, 72, 296, 236, 1076, 557,
- /* 1550 */ 445, 877, 1356, 134, 134, 168, 73, 73, 141, 161,
- /* 1560 */ 161, 1568, 557, 535, 568, 319, 568, 348, 536, 1007,
- /* 1570 */ 473, 261, 261, 889, 888, 235, 535, 568, 1024, 568,
- /* 1580 */ 475, 534, 261, 367, 109, 109, 521, 136, 136, 130,
- /* 1590 */ 130, 1024, 110, 366, 445, 569, 445, 109, 109, 1014,
- /* 1600 */ 162, 162, 156, 156, 568, 110, 1076, 445, 569, 445,
- /* 1610 */ 410, 351, 1014, 568, 353, 316, 559, 568, 343, 568,
- /* 1620 */ 100, 497, 357, 258, 100, 896, 897, 140, 140, 355,
- /* 1630 */ 1306, 1014, 1014, 1016, 1017, 27, 139, 139, 362, 451,
- /* 1640 */ 137, 137, 138, 138, 1014, 1014, 1016, 1017, 27, 1176,
- /* 1650 */ 447, 568, 372, 288, 111, 560, 1018, 4, 392, 392,
- /* 1660 */ 391, 273, 389, 568, 1137, 847, 568, 1072, 568, 258,
- /* 1670 */ 492, 563, 568, 211, 75, 75, 555, 960, 234, 261,
- /* 1680 */ 323, 111, 560, 927, 4, 113, 77, 77, 322, 74,
- /* 1690 */ 74, 42, 42, 1369, 445, 48, 48, 1414, 563, 972,
- /* 1700 */ 973, 1088, 1087, 1088, 1087, 860, 557, 150, 928, 1342,
- /* 1710 */ 113, 1354, 554, 1419, 1018, 1271, 1262, 1250, 236, 1249,
- /* 1720 */ 1251, 445, 1587, 1339, 308, 276, 168, 309, 11, 141,
- /* 1730 */ 393, 310, 232, 557, 1401, 1024, 335, 291, 1396, 219,
- /* 1740 */ 336, 109, 109, 934, 297, 1406, 235, 341, 477, 110,
- /* 1750 */ 502, 445, 569, 445, 1389, 1405, 1014, 400, 1289, 365,
- /* 1760 */ 223, 1480, 1024, 1479, 1351, 1352, 1350, 1349, 109, 109,
- /* 1770 */ 204, 1590, 1228, 558, 265, 218, 110, 205, 445, 569,
- /* 1780 */ 445, 410, 387, 1014, 1527, 179, 316, 559, 1014, 1014,
- /* 1790 */ 1016, 1017, 27, 230, 1525, 1225, 79, 560, 85, 4,
- /* 1800 */ 418, 215, 548, 81, 84, 188, 1402, 173, 181, 461,
- /* 1810 */ 451, 35, 462, 563, 183, 1014, 1014, 1016, 1017, 27,
- /* 1820 */ 184, 1485, 185, 186, 495, 242, 98, 398, 1408, 36,
- /* 1830 */ 1407, 484, 91, 469, 401, 1410, 445, 192, 1474, 246,
- /* 1840 */ 1496, 490, 346, 277, 248, 196, 493, 511, 557, 350,
- /* 1850 */ 1252, 249, 250, 403, 1309, 1308, 111, 560, 432, 4,
- /* 1860 */ 1307, 1300, 93, 1604, 881, 1603, 224, 404, 434, 520,
- /* 1870 */ 263, 435, 1573, 563, 1279, 1278, 364, 1024, 306, 1277,
- /* 1880 */ 264, 1602, 1559, 109, 109, 370, 1299, 307, 1558, 438,
- /* 1890 */ 128, 110, 1374, 445, 569, 445, 445, 546, 1014, 10,
- /* 1900 */ 1461, 105, 381, 1373, 34, 571, 99, 1332, 557, 314,
- /* 1910 */ 1182, 530, 272, 274, 379, 210, 1331, 547, 385, 386,
- /* 1920 */ 275, 572, 1247, 1242, 411, 412, 1512, 165, 178, 1513,
- /* 1930 */ 1014, 1014, 1016, 1017, 27, 1511, 1510, 1024, 78, 147,
- /* 1940 */ 166, 220, 221, 109, 109, 834, 304, 167, 446, 212,
- /* 1950 */ 318, 110, 231, 445, 569, 445, 144, 1086, 1014, 1084,
- /* 1960 */ 326, 180, 169, 1207, 182, 334, 238, 913, 241, 1100,
- /* 1970 */ 187, 170, 171, 421, 87, 88, 423, 189, 89, 90,
- /* 1980 */ 172, 1103, 243, 1099, 244, 158, 18, 245, 345, 247,
- /* 1990 */ 1014, 1014, 1016, 1017, 27, 261, 1092, 193, 1222, 489,
- /* 2000 */ 194, 37, 366, 849, 494, 251, 195, 506, 92, 19,
- /* 2010 */ 498, 358, 20, 503, 879, 361, 94, 892, 305, 159,
- /* 2020 */ 513, 39, 95, 1170, 160, 1053, 964, 1139, 96, 174,
- /* 2030 */ 1138, 225, 280, 282, 198, 958, 113, 1160, 1156, 260,
- /* 2040 */ 21, 22, 23, 1158, 1164, 1163, 1144, 24, 33, 25,
- /* 2050 */ 202, 542, 26, 100, 1067, 102, 1054, 103, 7, 1052,
- /* 2060 */ 1056, 1109, 1057, 1108, 266, 267, 28, 40, 390, 1019,
- /* 2070 */ 861, 112, 29, 564, 1178, 1177, 268, 176, 143, 923,
- /* 2080 */ 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238, 1238,
- /* 2090 */ 1238, 1238, 1238, 1238, 269, 1595,
+ /* 0 */ 572, 210, 572, 119, 116, 231, 572, 119, 116, 231,
+ /* 10 */ 572, 1317, 379, 1296, 410, 566, 566, 566, 572, 411,
+ /* 20 */ 380, 1317, 1279, 42, 42, 42, 42, 210, 1529, 72,
+ /* 30 */ 72, 974, 421, 42, 42, 495, 305, 281, 305, 975,
+ /* 40 */ 399, 72, 72, 126, 127, 81, 1217, 1217, 1054, 1057,
+ /* 50 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 480, 411,
+ /* 60 */ 1244, 1, 1, 578, 2, 1248, 554, 119, 116, 231,
+ /* 70 */ 319, 484, 147, 484, 528, 119, 116, 231, 533, 1330,
+ /* 80 */ 419, 527, 143, 126, 127, 81, 1217, 1217, 1054, 1057,
+ /* 90 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 119, 116,
+ /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121,
+ /* 110 */ 121, 120, 117, 448, 286, 286, 286, 286, 446, 446,
+ /* 120 */ 446, 1568, 378, 1570, 1193, 377, 1164, 569, 1164, 569,
+ /* 130 */ 411, 1568, 541, 261, 228, 448, 102, 146, 453, 318,
+ /* 140 */ 563, 242, 123, 123, 123, 123, 122, 122, 121, 121,
+ /* 150 */ 121, 120, 117, 448, 126, 127, 81, 1217, 1217, 1054,
+ /* 160 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 143,
+ /* 170 */ 296, 1193, 341, 452, 121, 121, 121, 120, 117, 448,
+ /* 180 */ 128, 1193, 1194, 1193, 149, 445, 444, 572, 120, 117,
+ /* 190 */ 448, 125, 125, 125, 125, 118, 123, 123, 123, 123,
+ /* 200 */ 122, 122, 121, 121, 121, 120, 117, 448, 458, 114,
+ /* 210 */ 13, 13, 550, 123, 123, 123, 123, 122, 122, 121,
+ /* 220 */ 121, 121, 120, 117, 448, 424, 318, 563, 1193, 1194,
+ /* 230 */ 1193, 150, 1225, 411, 1225, 125, 125, 125, 125, 123,
+ /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117,
+ /* 250 */ 448, 469, 344, 1041, 1041, 1055, 1058, 126, 127, 81,
+ /* 260 */ 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125,
+ /* 270 */ 125, 125, 1282, 526, 224, 1193, 572, 411, 226, 519,
+ /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121,
+ /* 290 */ 121, 121, 120, 117, 448, 1010, 16, 16, 1193, 134,
+ /* 300 */ 134, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, 1044,
+ /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123,
+ /* 320 */ 122, 122, 121, 121, 121, 120, 117, 448, 1045, 550,
+ /* 330 */ 1193, 375, 1193, 1194, 1193, 254, 1438, 401, 508, 505,
+ /* 340 */ 504, 112, 564, 570, 4, 929, 929, 435, 503, 342,
+ /* 350 */ 464, 330, 362, 396, 1238, 1193, 1194, 1193, 567, 572,
+ /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120,
+ /* 370 */ 117, 448, 286, 286, 371, 1581, 1607, 445, 444, 155,
+ /* 380 */ 411, 449, 72, 72, 1289, 569, 1222, 1193, 1194, 1193,
+ /* 390 */ 86, 1224, 273, 561, 547, 520, 520, 572, 99, 1223,
+ /* 400 */ 6, 1281, 476, 143, 126, 127, 81, 1217, 1217, 1054,
+ /* 410 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 554,
+ /* 420 */ 13, 13, 1031, 511, 1225, 1193, 1225, 553, 110, 110,
+ /* 430 */ 224, 572, 1239, 177, 572, 429, 111, 199, 449, 573,
+ /* 440 */ 449, 432, 1555, 1019, 327, 555, 1193, 272, 289, 370,
+ /* 450 */ 514, 365, 513, 259, 72, 72, 547, 72, 72, 361,
+ /* 460 */ 318, 563, 1613, 123, 123, 123, 123, 122, 122, 121,
+ /* 470 */ 121, 121, 120, 117, 448, 1019, 1019, 1021, 1022, 28,
+ /* 480 */ 286, 286, 1193, 1194, 1193, 1159, 572, 1612, 411, 904,
+ /* 490 */ 192, 554, 358, 569, 554, 940, 537, 521, 1159, 437,
+ /* 500 */ 415, 1159, 556, 1193, 1194, 1193, 572, 548, 548, 52,
+ /* 510 */ 52, 216, 126, 127, 81, 1217, 1217, 1054, 1057, 1044,
+ /* 520 */ 1044, 124, 124, 125, 125, 125, 125, 1193, 478, 136,
+ /* 530 */ 136, 411, 286, 286, 1493, 509, 122, 122, 121, 121,
+ /* 540 */ 121, 120, 117, 448, 1010, 569, 522, 219, 545, 545,
+ /* 550 */ 318, 563, 143, 6, 536, 126, 127, 81, 1217, 1217,
+ /* 560 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 570 */ 1557, 123, 123, 123, 123, 122, 122, 121, 121, 121,
+ /* 580 */ 120, 117, 448, 489, 1193, 1194, 1193, 486, 283, 1270,
+ /* 590 */ 960, 254, 1193, 375, 508, 505, 504, 1193, 342, 574,
+ /* 600 */ 1193, 574, 411, 294, 503, 960, 879, 193, 484, 318,
+ /* 610 */ 563, 386, 292, 382, 123, 123, 123, 123, 122, 122,
+ /* 620 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 630 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 640 */ 125, 411, 396, 1139, 1193, 872, 101, 286, 286, 1193,
+ /* 650 */ 1194, 1193, 375, 1096, 1193, 1194, 1193, 1193, 1194, 1193,
+ /* 660 */ 569, 459, 33, 375, 235, 126, 127, 81, 1217, 1217,
+ /* 670 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 680 */ 1437, 962, 572, 230, 961, 123, 123, 123, 123, 122,
+ /* 690 */ 122, 121, 121, 121, 120, 117, 448, 1159, 230, 1193,
+ /* 700 */ 158, 1193, 1194, 1193, 1556, 13, 13, 303, 960, 1233,
+ /* 710 */ 1159, 154, 411, 1159, 375, 1584, 1177, 5, 371, 1581,
+ /* 720 */ 431, 1239, 3, 960, 123, 123, 123, 123, 122, 122,
+ /* 730 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 740 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 750 */ 125, 411, 210, 571, 1193, 1032, 1193, 1194, 1193, 1193,
+ /* 760 */ 390, 855, 156, 1555, 376, 404, 1101, 1101, 492, 572,
+ /* 770 */ 469, 344, 1322, 1322, 1555, 126, 127, 81, 1217, 1217,
+ /* 780 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 790 */ 130, 572, 13, 13, 532, 123, 123, 123, 123, 122,
+ /* 800 */ 122, 121, 121, 121, 120, 117, 448, 304, 572, 457,
+ /* 810 */ 229, 1193, 1194, 1193, 13, 13, 1193, 1194, 1193, 1300,
+ /* 820 */ 467, 1270, 411, 1320, 1320, 1555, 1015, 457, 456, 436,
+ /* 830 */ 301, 72, 72, 1268, 123, 123, 123, 123, 122, 122,
+ /* 840 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 850 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 860 */ 125, 411, 384, 1076, 1159, 286, 286, 421, 314, 280,
+ /* 870 */ 280, 287, 287, 461, 408, 407, 1539, 1159, 569, 572,
+ /* 880 */ 1159, 1196, 569, 409, 569, 126, 127, 81, 1217, 1217,
+ /* 890 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125,
+ /* 900 */ 457, 1485, 13, 13, 1541, 123, 123, 123, 123, 122,
+ /* 910 */ 122, 121, 121, 121, 120, 117, 448, 202, 572, 462,
+ /* 920 */ 1587, 578, 2, 1248, 843, 844, 845, 1563, 319, 409,
+ /* 930 */ 147, 6, 411, 257, 256, 255, 208, 1330, 9, 1196,
+ /* 940 */ 264, 72, 72, 1436, 123, 123, 123, 123, 122, 122,
+ /* 950 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217,
+ /* 960 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 970 */ 125, 572, 286, 286, 572, 1213, 411, 577, 315, 1248,
+ /* 980 */ 421, 371, 1581, 356, 319, 569, 147, 495, 529, 1644,
+ /* 990 */ 397, 935, 495, 1330, 71, 71, 934, 72, 72, 242,
+ /* 1000 */ 1328, 105, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124,
+ /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122,
+ /* 1020 */ 122, 121, 121, 121, 120, 117, 448, 1117, 286, 286,
+ /* 1030 */ 1422, 452, 1528, 1213, 443, 286, 286, 1492, 1355, 313,
+ /* 1040 */ 478, 569, 1118, 454, 351, 495, 354, 1266, 569, 209,
+ /* 1050 */ 572, 418, 179, 572, 1031, 242, 385, 1119, 523, 123,
+ /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117,
+ /* 1070 */ 448, 1020, 108, 72, 72, 1019, 13, 13, 915, 572,
+ /* 1080 */ 1498, 572, 286, 286, 98, 530, 1537, 452, 916, 1334,
+ /* 1090 */ 1329, 203, 411, 286, 286, 569, 152, 211, 1498, 1500,
+ /* 1100 */ 426, 569, 56, 56, 57, 57, 569, 1019, 1019, 1021,
+ /* 1110 */ 447, 572, 411, 531, 12, 297, 126, 127, 81, 1217,
+ /* 1120 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1130 */ 125, 572, 411, 867, 15, 15, 126, 127, 81, 1217,
+ /* 1140 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1150 */ 125, 373, 529, 264, 44, 44, 126, 115, 81, 1217,
+ /* 1160 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125,
+ /* 1170 */ 125, 1498, 478, 1271, 417, 123, 123, 123, 123, 122,
+ /* 1180 */ 122, 121, 121, 121, 120, 117, 448, 205, 1213, 495,
+ /* 1190 */ 430, 867, 468, 322, 495, 123, 123, 123, 123, 122,
+ /* 1200 */ 122, 121, 121, 121, 120, 117, 448, 572, 557, 1140,
+ /* 1210 */ 1642, 1422, 1642, 543, 572, 123, 123, 123, 123, 122,
+ /* 1220 */ 122, 121, 121, 121, 120, 117, 448, 572, 1422, 572,
+ /* 1230 */ 13, 13, 542, 323, 1325, 411, 334, 58, 58, 349,
+ /* 1240 */ 1422, 1170, 326, 286, 286, 549, 1213, 300, 895, 530,
+ /* 1250 */ 45, 45, 59, 59, 1140, 1643, 569, 1643, 565, 417,
+ /* 1260 */ 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, 124,
+ /* 1270 */ 125, 125, 125, 125, 1367, 373, 500, 290, 1193, 512,
+ /* 1280 */ 1366, 427, 394, 394, 393, 275, 391, 896, 1138, 852,
+ /* 1290 */ 478, 258, 1422, 1170, 463, 1159, 12, 331, 428, 333,
+ /* 1300 */ 1117, 460, 236, 258, 325, 460, 544, 1544, 1159, 1098,
+ /* 1310 */ 491, 1159, 324, 1098, 440, 1118, 335, 516, 123, 123,
+ /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 448,
+ /* 1330 */ 1119, 318, 563, 1138, 572, 1193, 1194, 1193, 112, 564,
+ /* 1340 */ 201, 4, 238, 433, 935, 490, 285, 228, 1517, 934,
+ /* 1350 */ 170, 560, 572, 142, 1516, 567, 572, 60, 60, 572,
+ /* 1360 */ 416, 572, 441, 572, 535, 302, 875, 8, 487, 572,
+ /* 1370 */ 237, 572, 416, 572, 485, 61, 61, 572, 449, 62,
+ /* 1380 */ 62, 332, 63, 63, 46, 46, 47, 47, 361, 572,
+ /* 1390 */ 561, 572, 48, 48, 50, 50, 51, 51, 572, 295,
+ /* 1400 */ 64, 64, 482, 295, 539, 412, 471, 1031, 572, 538,
+ /* 1410 */ 318, 563, 65, 65, 66, 66, 409, 475, 572, 1031,
+ /* 1420 */ 572, 14, 14, 875, 1020, 110, 110, 409, 1019, 572,
+ /* 1430 */ 474, 67, 67, 111, 455, 449, 573, 449, 98, 317,
+ /* 1440 */ 1019, 132, 132, 133, 133, 572, 1561, 572, 974, 409,
+ /* 1450 */ 6, 1562, 68, 68, 1560, 6, 975, 572, 6, 1559,
+ /* 1460 */ 1019, 1019, 1021, 6, 346, 218, 101, 531, 53, 53,
+ /* 1470 */ 69, 69, 1019, 1019, 1021, 1022, 28, 1586, 1181, 451,
+ /* 1480 */ 70, 70, 290, 87, 215, 31, 1363, 394, 394, 393,
+ /* 1490 */ 275, 391, 350, 109, 852, 107, 572, 112, 564, 483,
+ /* 1500 */ 4, 1212, 572, 239, 153, 572, 39, 236, 1299, 325,
+ /* 1510 */ 112, 564, 1298, 4, 567, 572, 32, 324, 572, 54,
+ /* 1520 */ 54, 572, 1135, 353, 398, 165, 165, 567, 166, 166,
+ /* 1530 */ 572, 291, 355, 572, 17, 357, 572, 449, 77, 77,
+ /* 1540 */ 1313, 55, 55, 1297, 73, 73, 572, 238, 470, 561,
+ /* 1550 */ 449, 472, 364, 135, 135, 170, 74, 74, 142, 163,
+ /* 1560 */ 163, 374, 561, 539, 572, 321, 572, 886, 540, 137,
+ /* 1570 */ 137, 339, 1353, 422, 298, 237, 539, 572, 1031, 572,
+ /* 1580 */ 340, 538, 101, 369, 110, 110, 162, 131, 131, 164,
+ /* 1590 */ 164, 1031, 111, 368, 449, 573, 449, 110, 110, 1019,
+ /* 1600 */ 157, 157, 141, 141, 572, 111, 572, 449, 573, 449,
+ /* 1610 */ 412, 288, 1019, 572, 882, 318, 563, 572, 219, 572,
+ /* 1620 */ 241, 1012, 477, 263, 263, 894, 893, 140, 140, 138,
+ /* 1630 */ 138, 1019, 1019, 1021, 1022, 28, 139, 139, 525, 455,
+ /* 1640 */ 76, 76, 78, 78, 1019, 1019, 1021, 1022, 28, 1181,
+ /* 1650 */ 451, 572, 1083, 290, 112, 564, 1575, 4, 394, 394,
+ /* 1660 */ 393, 275, 391, 572, 1023, 852, 572, 479, 345, 263,
+ /* 1670 */ 101, 567, 882, 1376, 75, 75, 1421, 501, 236, 260,
+ /* 1680 */ 325, 112, 564, 359, 4, 101, 43, 43, 324, 49,
+ /* 1690 */ 49, 901, 902, 161, 449, 101, 977, 978, 567, 1079,
+ /* 1700 */ 1349, 260, 965, 932, 263, 114, 561, 1095, 517, 1095,
+ /* 1710 */ 1083, 1094, 865, 1094, 151, 933, 1144, 114, 238, 1361,
+ /* 1720 */ 558, 449, 1023, 559, 1426, 1278, 170, 1269, 1257, 142,
+ /* 1730 */ 1601, 1256, 1258, 561, 1594, 1031, 496, 278, 213, 1346,
+ /* 1740 */ 310, 110, 110, 939, 311, 312, 237, 11, 234, 111,
+ /* 1750 */ 221, 449, 573, 449, 293, 395, 1019, 1408, 337, 1403,
+ /* 1760 */ 1396, 338, 1031, 299, 343, 1413, 1412, 481, 110, 110,
+ /* 1770 */ 506, 402, 225, 1296, 206, 367, 111, 1358, 449, 573,
+ /* 1780 */ 449, 412, 1359, 1019, 1489, 1488, 318, 563, 1019, 1019,
+ /* 1790 */ 1021, 1022, 28, 562, 207, 220, 80, 564, 389, 4,
+ /* 1800 */ 1597, 1357, 552, 1356, 1233, 181, 267, 232, 1536, 1534,
+ /* 1810 */ 455, 1230, 420, 567, 82, 1019, 1019, 1021, 1022, 28,
+ /* 1820 */ 86, 217, 85, 1494, 190, 175, 183, 465, 185, 466,
+ /* 1830 */ 36, 1409, 186, 187, 188, 499, 449, 244, 37, 99,
+ /* 1840 */ 400, 1415, 1414, 488, 1417, 194, 473, 403, 561, 1483,
+ /* 1850 */ 248, 92, 1505, 494, 198, 279, 112, 564, 250, 4,
+ /* 1860 */ 348, 497, 405, 352, 1259, 251, 252, 515, 1316, 434,
+ /* 1870 */ 1315, 1314, 94, 567, 1307, 886, 1306, 1031, 226, 406,
+ /* 1880 */ 1611, 1610, 438, 110, 110, 1580, 1286, 524, 439, 308,
+ /* 1890 */ 266, 111, 1285, 449, 573, 449, 449, 309, 1019, 366,
+ /* 1900 */ 1284, 1609, 265, 1566, 1565, 442, 372, 1381, 561, 129,
+ /* 1910 */ 550, 1380, 10, 1470, 383, 106, 316, 551, 100, 35,
+ /* 1920 */ 534, 575, 212, 1339, 381, 387, 1187, 1338, 274, 276,
+ /* 1930 */ 1019, 1019, 1021, 1022, 28, 277, 413, 1031, 576, 1254,
+ /* 1940 */ 388, 1521, 1249, 110, 110, 167, 1522, 168, 148, 1520,
+ /* 1950 */ 1519, 111, 306, 449, 573, 449, 222, 223, 1019, 839,
+ /* 1960 */ 169, 79, 450, 214, 414, 233, 320, 145, 1093, 1091,
+ /* 1970 */ 328, 182, 171, 1212, 918, 184, 240, 336, 243, 1107,
+ /* 1980 */ 189, 172, 173, 423, 425, 88, 180, 191, 89, 90,
+ /* 1990 */ 1019, 1019, 1021, 1022, 28, 91, 174, 1110, 245, 1106,
+ /* 2000 */ 246, 159, 18, 247, 347, 1099, 263, 195, 1227, 493,
+ /* 2010 */ 249, 196, 38, 854, 498, 368, 253, 360, 897, 197,
+ /* 2020 */ 502, 93, 19, 20, 507, 884, 363, 510, 95, 307,
+ /* 2030 */ 160, 96, 518, 97, 1175, 1060, 1146, 40, 21, 227,
+ /* 2040 */ 176, 1145, 282, 284, 969, 200, 963, 114, 262, 1165,
+ /* 2050 */ 22, 23, 24, 1161, 1169, 25, 1163, 1150, 34, 26,
+ /* 2060 */ 1168, 546, 27, 204, 101, 103, 104, 1074, 7, 1061,
+ /* 2070 */ 1059, 1063, 1116, 1064, 1115, 268, 269, 29, 41, 270,
+ /* 2080 */ 1024, 866, 113, 30, 568, 392, 1183, 144, 178, 1182,
+ /* 2090 */ 271, 928, 1245, 1245, 1245, 1245, 1245, 1245, 1245, 1602,
};
static const YYCODETYPE yy_lookahead[] = {
/* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276,
@@ -168597,7 +171787,7 @@ static const YYCODETYPE yy_lookahead[] = {
/* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59,
- /* 760 */ 201, 21, 241, 304, 22, 206, 127, 128, 129, 193,
+ /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193,
/* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47,
/* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
/* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106,
@@ -168608,129 +171798,129 @@ static const YYCODETYPE yy_lookahead[] = {
/* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239,
- /* 870 */ 240, 239, 240, 193, 106, 107, 193, 89, 252, 193,
- /* 880 */ 92, 59, 252, 141, 252, 43, 44, 45, 46, 47,
+ /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193,
+ /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47,
/* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
/* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106,
- /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 16,
- /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 25,
+ /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244,
+ /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254,
/* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117,
- /* 940 */ 24, 216, 217, 263, 102, 103, 104, 105, 106, 107,
+ /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107,
/* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
/* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190,
- /* 980 */ 77, 226, 79, 193, 195, 252, 197, 193, 19, 301,
- /* 990 */ 302, 193, 193, 204, 216, 217, 226, 216, 217, 266,
+ /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301,
+ /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266,
/* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52,
/* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106,
/* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240,
- /* 1030 */ 232, 298, 238, 117, 253, 239, 240, 238, 259, 260,
- /* 1040 */ 193, 252, 27, 31, 193, 193, 142, 204, 252, 193,
- /* 1050 */ 193, 39, 262, 193, 100, 266, 278, 42, 204, 102,
+ /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260,
+ /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262,
+ /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102,
/* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
/* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193,
- /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 238,
+ /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240,
/* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212,
- /* 1100 */ 24, 193, 216, 217, 216, 217, 252, 153, 154, 155,
- /* 1110 */ 253, 16, 19, 144, 213, 268, 43, 44, 45, 46,
+ /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155,
+ /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46,
/* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1130 */ 57, 238, 19, 59, 193, 59, 43, 44, 45, 46,
+ /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46,
/* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1150 */ 57, 22, 23, 193, 25, 193, 43, 44, 45, 46,
+ /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46,
/* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1170 */ 57, 284, 77, 193, 79, 102, 103, 104, 105, 106,
- /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 193, 193,
- /* 1190 */ 193, 117, 291, 117, 232, 102, 103, 104, 105, 106,
- /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 22, 23,
- /* 1210 */ 66, 25, 216, 217, 35, 102, 103, 104, 105, 106,
- /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 268, 85,
- /* 1230 */ 101, 193, 309, 309, 240, 19, 313, 313, 94, 208,
- /* 1240 */ 209, 193, 239, 240, 193, 66, 252, 19, 268, 244,
- /* 1250 */ 216, 217, 193, 74, 213, 252, 161, 19, 263, 254,
+ /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106,
+ /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193,
+ /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106,
+ /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22,
+ /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106,
+ /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193,
+ /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238,
+ /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116,
+ /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209,
/* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 1270 */ 54, 55, 56, 57, 193, 216, 217, 5, 59, 193,
- /* 1280 */ 19, 244, 10, 11, 12, 13, 14, 101, 309, 17,
- /* 1290 */ 146, 254, 313, 193, 193, 76, 115, 216, 217, 309,
- /* 1300 */ 12, 263, 30, 313, 32, 46, 87, 46, 89, 130,
- /* 1310 */ 193, 92, 40, 22, 263, 27, 216, 217, 102, 103,
+ /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66,
+ /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17,
+ /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79,
+ /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29,
+ /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103,
/* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
- /* 1330 */ 42, 150, 291, 216, 217, 116, 117, 118, 19, 20,
- /* 1340 */ 193, 22, 70, 260, 116, 193, 24, 264, 193, 263,
- /* 1350 */ 78, 63, 61, 81, 116, 36, 193, 260, 193, 29,
- /* 1360 */ 193, 264, 193, 33, 145, 193, 59, 48, 216, 217,
- /* 1370 */ 98, 216, 217, 193, 115, 193, 115, 193, 59, 216,
- /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 255, 216, 217,
- /* 1390 */ 71, 193, 131, 193, 25, 65, 216, 217, 216, 217,
- /* 1400 */ 216, 217, 208, 209, 85, 133, 193, 100, 193, 90,
- /* 1410 */ 138, 139, 138, 139, 216, 217, 216, 217, 193, 100,
- /* 1420 */ 193, 108, 135, 116, 117, 106, 107, 140, 121, 216,
- /* 1430 */ 217, 216, 217, 114, 162, 116, 117, 118, 299, 300,
- /* 1440 */ 121, 216, 217, 216, 217, 193, 244, 193, 135, 244,
- /* 1450 */ 193, 256, 257, 140, 244, 193, 254, 193, 193, 254,
- /* 1460 */ 153, 154, 155, 141, 254, 149, 150, 258, 216, 217,
+ /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20,
+ /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140,
+ /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193,
+ /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193,
+ /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216,
+ /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193,
+ /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260,
+ /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90,
+ /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100,
+ /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193,
+ /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244,
+ /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254,
+ /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309,
+ /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217,
/* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2,
- /* 1480 */ 216, 217, 5, 115, 158, 193, 160, 10, 11, 12,
- /* 1490 */ 13, 14, 193, 59, 17, 126, 193, 19, 20, 129,
- /* 1500 */ 22, 193, 22, 22, 24, 193, 23, 30, 25, 32,
- /* 1510 */ 19, 20, 144, 22, 36, 216, 217, 40, 193, 216,
- /* 1520 */ 217, 193, 152, 129, 216, 217, 193, 36, 216, 217,
- /* 1530 */ 193, 99, 193, 193, 53, 193, 193, 59, 23, 193,
- /* 1540 */ 25, 216, 217, 193, 216, 217, 152, 70, 59, 71,
- /* 1550 */ 59, 117, 193, 216, 217, 78, 216, 217, 81, 216,
- /* 1560 */ 217, 318, 71, 85, 193, 133, 193, 193, 90, 23,
- /* 1570 */ 23, 25, 25, 120, 121, 98, 85, 193, 100, 193,
- /* 1580 */ 23, 90, 25, 121, 106, 107, 19, 216, 217, 216,
+ /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12,
+ /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116,
+ /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32,
+ /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216,
+ /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217,
+ /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217,
+ /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71,
+ /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216,
+ /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216,
+ /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193,
+ /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216,
/* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121,
- /* 1600 */ 216, 217, 216, 217, 193, 114, 117, 116, 117, 118,
- /* 1610 */ 133, 193, 121, 193, 193, 138, 139, 193, 23, 193,
- /* 1620 */ 25, 23, 23, 25, 25, 7, 8, 216, 217, 193,
- /* 1630 */ 193, 153, 154, 155, 156, 157, 216, 217, 193, 162,
+ /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118,
+ /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193,
+ /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216,
+ /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162,
/* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1,
- /* 1650 */ 2, 193, 193, 5, 19, 20, 59, 22, 10, 11,
- /* 1660 */ 12, 13, 14, 193, 97, 17, 193, 23, 193, 25,
- /* 1670 */ 288, 36, 193, 242, 216, 217, 236, 23, 30, 25,
+ /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11,
+ /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25,
+ /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25,
/* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216,
- /* 1690 */ 217, 216, 217, 193, 59, 216, 217, 193, 36, 83,
- /* 1700 */ 84, 153, 153, 155, 155, 23, 71, 25, 23, 193,
- /* 1710 */ 25, 193, 193, 193, 117, 193, 193, 193, 70, 193,
- /* 1720 */ 193, 59, 193, 255, 255, 287, 78, 255, 243, 81,
- /* 1730 */ 191, 255, 297, 71, 271, 100, 293, 245, 267, 214,
- /* 1740 */ 246, 106, 107, 108, 246, 271, 98, 245, 293, 114,
- /* 1750 */ 220, 116, 117, 118, 267, 271, 121, 271, 225, 219,
- /* 1760 */ 229, 219, 100, 219, 259, 259, 259, 259, 106, 107,
- /* 1770 */ 249, 196, 60, 280, 141, 243, 114, 249, 116, 117,
- /* 1780 */ 118, 133, 245, 121, 200, 297, 138, 139, 153, 154,
- /* 1790 */ 155, 156, 157, 297, 200, 38, 19, 20, 151, 22,
- /* 1800 */ 200, 150, 140, 294, 294, 22, 272, 43, 234, 18,
- /* 1810 */ 162, 270, 200, 36, 237, 153, 154, 155, 156, 157,
- /* 1820 */ 237, 283, 237, 237, 18, 199, 149, 246, 272, 270,
- /* 1830 */ 272, 200, 158, 246, 246, 234, 59, 234, 246, 199,
- /* 1840 */ 290, 62, 289, 200, 199, 22, 221, 115, 71, 200,
- /* 1850 */ 200, 199, 199, 221, 218, 218, 19, 20, 64, 22,
- /* 1860 */ 218, 227, 22, 224, 126, 224, 165, 221, 24, 305,
- /* 1870 */ 200, 113, 312, 36, 218, 220, 218, 100, 282, 218,
- /* 1880 */ 91, 218, 317, 106, 107, 221, 227, 282, 317, 82,
- /* 1890 */ 148, 114, 265, 116, 117, 118, 59, 145, 121, 22,
- /* 1900 */ 277, 158, 200, 265, 25, 202, 147, 250, 71, 279,
- /* 1910 */ 13, 146, 194, 194, 249, 248, 250, 140, 247, 246,
- /* 1920 */ 6, 192, 192, 192, 303, 303, 213, 207, 300, 213,
- /* 1930 */ 153, 154, 155, 156, 157, 213, 213, 100, 213, 222,
- /* 1940 */ 207, 214, 214, 106, 107, 4, 222, 207, 3, 22,
- /* 1950 */ 163, 114, 15, 116, 117, 118, 16, 23, 121, 23,
- /* 1960 */ 139, 151, 130, 25, 142, 16, 24, 20, 144, 1,
- /* 1970 */ 142, 130, 130, 61, 53, 53, 37, 151, 53, 53,
- /* 1980 */ 130, 116, 34, 1, 141, 5, 22, 115, 161, 141,
- /* 1990 */ 153, 154, 155, 156, 157, 25, 68, 68, 75, 41,
- /* 2000 */ 115, 24, 131, 20, 19, 125, 22, 96, 22, 22,
- /* 2010 */ 67, 23, 22, 67, 59, 24, 22, 28, 67, 23,
- /* 2020 */ 22, 22, 149, 23, 23, 23, 116, 23, 25, 37,
- /* 2030 */ 97, 141, 23, 23, 22, 143, 25, 75, 88, 34,
- /* 2040 */ 34, 34, 34, 86, 75, 93, 23, 34, 22, 34,
- /* 2050 */ 25, 24, 34, 25, 23, 142, 23, 142, 44, 23,
- /* 2060 */ 23, 23, 11, 23, 25, 22, 22, 22, 15, 23,
- /* 2070 */ 23, 22, 22, 25, 1, 1, 141, 25, 23, 135,
- /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2090 */ 319, 319, 319, 319, 141, 141, 319, 319, 319, 319,
+ /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23,
+ /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155,
+ /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193,
+ /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81,
+ /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255,
+ /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114,
+ /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267,
+ /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107,
+ /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117,
+ /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154,
+ /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22,
+ /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200,
+ /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157,
+ /* 1820 */ 151, 150, 294, 283, 22, 43, 234, 18, 237, 200,
+ /* 1830 */ 270, 272, 237, 237, 237, 18, 59, 199, 270, 149,
+ /* 1840 */ 246, 272, 272, 200, 234, 234, 246, 246, 71, 246,
+ /* 1850 */ 199, 158, 290, 62, 22, 200, 19, 20, 199, 22,
+ /* 1860 */ 289, 221, 221, 200, 200, 199, 199, 115, 218, 64,
+ /* 1870 */ 218, 218, 22, 36, 227, 126, 227, 100, 165, 221,
+ /* 1880 */ 224, 224, 24, 106, 107, 312, 218, 305, 113, 282,
+ /* 1890 */ 91, 114, 220, 116, 117, 118, 59, 282, 121, 218,
+ /* 1900 */ 218, 218, 200, 317, 317, 82, 221, 265, 71, 148,
+ /* 1910 */ 145, 265, 22, 277, 200, 158, 279, 140, 147, 25,
+ /* 1920 */ 146, 202, 248, 250, 249, 247, 13, 250, 194, 194,
+ /* 1930 */ 153, 154, 155, 156, 157, 6, 303, 100, 192, 192,
+ /* 1940 */ 246, 213, 192, 106, 107, 207, 213, 207, 222, 213,
+ /* 1950 */ 213, 114, 222, 116, 117, 118, 214, 214, 121, 4,
+ /* 1960 */ 207, 213, 3, 22, 303, 15, 163, 16, 23, 23,
+ /* 1970 */ 139, 151, 130, 25, 20, 142, 24, 16, 144, 1,
+ /* 1980 */ 142, 130, 130, 61, 37, 53, 300, 151, 53, 53,
+ /* 1990 */ 153, 154, 155, 156, 157, 53, 130, 116, 34, 1,
+ /* 2000 */ 141, 5, 22, 115, 161, 68, 25, 68, 75, 41,
+ /* 2010 */ 141, 115, 24, 20, 19, 131, 125, 23, 28, 22,
+ /* 2020 */ 67, 22, 22, 22, 67, 59, 24, 96, 22, 67,
+ /* 2030 */ 23, 149, 22, 25, 23, 23, 23, 22, 34, 141,
+ /* 2040 */ 37, 97, 23, 23, 116, 22, 143, 25, 34, 75,
+ /* 2050 */ 34, 34, 34, 88, 75, 34, 86, 23, 22, 34,
+ /* 2060 */ 93, 24, 34, 25, 25, 142, 142, 23, 44, 23,
+ /* 2070 */ 23, 23, 23, 11, 23, 25, 22, 22, 22, 141,
+ /* 2080 */ 23, 23, 22, 22, 25, 15, 1, 23, 25, 1,
+ /* 2090 */ 141, 135, 319, 319, 319, 319, 319, 319, 319, 141,
/* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
@@ -168749,176 +171939,177 @@ static const YYCODETYPE yy_lookahead[] = {
/* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2280 */ 319,
+ /* 2280 */ 319, 319, 319, 319, 319,
};
-#define YY_SHIFT_COUNT (574)
+#define YY_SHIFT_COUNT (578)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (2074)
+#define YY_SHIFT_MAX (2088)
static const unsigned short int yy_shift_ofst[] = {
/* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837,
/* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837,
/* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
- /* 30 */ 271, 271, 1219, 1219, 216, 88, 1, 1, 1, 1,
- /* 40 */ 1, 40, 111, 258, 361, 469, 512, 583, 622, 693,
- /* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093,
+ /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1,
+ /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622,
+ /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093,
/* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093,
- /* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662,
- /* 80 */ 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635,
+ /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
/* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
- /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430,
- /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533,
- /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113,
- /* 160 */ 113, 22, 22, 2096, 2096, 328, 328, 328, 239, 468,
- /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533,
- /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969,
- /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822,
- /* 210 */ 67, 1274, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 1307,
- /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700,
- /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533,
- /* 250 */ 533, 533, 533, 1179, 1179, 1179, 533, 533, 533, 565,
- /* 260 */ 533, 533, 533, 916, 1144, 533, 533, 1288, 533, 533,
- /* 270 */ 533, 533, 533, 533, 533, 533, 639, 1330, 209, 1076,
- /* 280 */ 1076, 1076, 1076, 580, 209, 209, 1313, 768, 917, 649,
- /* 290 */ 1181, 1316, 405, 1316, 1238, 249, 1181, 1181, 249, 1181,
- /* 300 */ 405, 1238, 1369, 464, 1259, 1012, 1012, 1012, 1368, 1368,
- /* 310 */ 1368, 1368, 184, 184, 1326, 904, 1287, 1480, 1712, 1712,
- /* 320 */ 1633, 1633, 1757, 1757, 1633, 1647, 1651, 1783, 1764, 1791,
- /* 330 */ 1791, 1791, 1791, 1633, 1806, 1677, 1651, 1651, 1677, 1783,
- /* 340 */ 1764, 1677, 1764, 1677, 1633, 1806, 1674, 1779, 1633, 1806,
- /* 350 */ 1823, 1633, 1806, 1633, 1806, 1823, 1732, 1732, 1732, 1794,
- /* 360 */ 1840, 1840, 1823, 1732, 1738, 1732, 1794, 1732, 1732, 1701,
- /* 370 */ 1844, 1758, 1758, 1823, 1633, 1789, 1789, 1807, 1807, 1742,
- /* 380 */ 1752, 1877, 1633, 1743, 1742, 1759, 1765, 1677, 1879, 1897,
- /* 390 */ 1897, 1914, 1914, 1914, 2096, 2096, 2096, 2096, 2096, 2096,
- /* 400 */ 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 2096, 207,
- /* 410 */ 1095, 331, 620, 903, 806, 1074, 1483, 1432, 1481, 1322,
- /* 420 */ 1370, 1394, 1515, 1291, 1546, 1547, 1557, 1595, 1598, 1599,
- /* 430 */ 1434, 1453, 1618, 1462, 1567, 1489, 1644, 1654, 1616, 1660,
- /* 440 */ 1548, 1549, 1682, 1685, 1597, 742, 1941, 1945, 1927, 1787,
- /* 450 */ 1937, 1940, 1934, 1936, 1821, 1810, 1832, 1938, 1938, 1942,
- /* 460 */ 1822, 1947, 1824, 1949, 1968, 1828, 1841, 1938, 1842, 1912,
- /* 470 */ 1939, 1938, 1826, 1921, 1922, 1925, 1926, 1850, 1865, 1948,
- /* 480 */ 1843, 1982, 1980, 1964, 1872, 1827, 1928, 1970, 1929, 1923,
- /* 490 */ 1958, 1848, 1885, 1977, 1983, 1985, 1871, 1880, 1984, 1943,
- /* 500 */ 1986, 1987, 1988, 1990, 1946, 1955, 1991, 1911, 1989, 1994,
- /* 510 */ 1951, 1992, 1996, 1873, 1998, 2000, 2001, 2002, 2003, 2004,
- /* 520 */ 1999, 1933, 1890, 2009, 2010, 1910, 2005, 2012, 1892, 2011,
- /* 530 */ 2006, 2007, 2008, 2013, 1950, 1962, 1957, 2014, 1969, 1952,
- /* 540 */ 2015, 2023, 2026, 2027, 2025, 2028, 2018, 1913, 1915, 2031,
- /* 550 */ 2011, 2033, 2036, 2037, 2038, 2039, 2040, 2043, 2051, 2044,
- /* 560 */ 2045, 2046, 2047, 2049, 2050, 2048, 1944, 1935, 1953, 1954,
- /* 570 */ 2052, 2055, 2053, 2073, 2074,
+ /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94,
+ /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533,
+ /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123,
+ /* 160 */ 113, 113, 113, 22, 22, 2100, 2100, 328, 328, 328,
+ /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187,
+ /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133,
+ /* 210 */ 822, 822, 67, 1193, 2100, 2100, 2100, 2100, 2100, 2100,
+ /* 220 */ 2100, 1307, 954, 954, 585, 472, 640, 387, 695, 538,
+ /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533,
+ /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533,
+ /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288,
+ /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280,
+ /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768,
+ /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315,
+ /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417,
+ /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482,
+ /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802,
+ /* 330 */ 1782, 1809, 1809, 1809, 1809, 1665, 1817, 1690, 1671, 1671,
+ /* 340 */ 1690, 1802, 1782, 1690, 1782, 1690, 1665, 1817, 1693, 1791,
+ /* 350 */ 1665, 1817, 1832, 1665, 1817, 1665, 1817, 1832, 1752, 1752,
+ /* 360 */ 1752, 1805, 1850, 1850, 1832, 1752, 1749, 1752, 1805, 1752,
+ /* 370 */ 1752, 1713, 1858, 1775, 1775, 1832, 1665, 1799, 1799, 1823,
+ /* 380 */ 1823, 1761, 1765, 1890, 1665, 1757, 1761, 1771, 1774, 1690,
+ /* 390 */ 1894, 1913, 1913, 1929, 1929, 1929, 2100, 2100, 2100, 2100,
+ /* 400 */ 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100,
+ /* 410 */ 2100, 207, 1220, 331, 620, 967, 806, 1074, 1499, 1432,
+ /* 420 */ 1463, 1479, 1419, 1422, 1557, 1512, 1598, 1599, 1644, 1645,
+ /* 430 */ 1654, 1660, 1555, 1505, 1684, 1462, 1670, 1563, 1619, 1593,
+ /* 440 */ 1676, 1679, 1613, 1680, 1554, 1558, 1689, 1692, 1605, 1589,
+ /* 450 */ 1955, 1959, 1941, 1803, 1950, 1951, 1945, 1946, 1831, 1820,
+ /* 460 */ 1842, 1948, 1948, 1952, 1833, 1954, 1834, 1961, 1978, 1838,
+ /* 470 */ 1851, 1948, 1852, 1922, 1947, 1948, 1836, 1932, 1935, 1936,
+ /* 480 */ 1942, 1866, 1881, 1964, 1859, 1998, 1996, 1980, 1888, 1843,
+ /* 490 */ 1937, 1981, 1939, 1933, 1968, 1869, 1896, 1988, 1993, 1995,
+ /* 500 */ 1884, 1891, 1997, 1953, 1999, 2000, 1994, 2001, 1957, 1966,
+ /* 510 */ 2002, 1931, 1990, 2006, 1962, 2003, 2007, 2004, 1882, 2010,
+ /* 520 */ 2011, 2012, 2008, 2013, 2015, 1944, 1898, 2019, 2020, 1928,
+ /* 530 */ 2014, 2023, 1903, 2022, 2016, 2017, 2018, 2021, 1965, 1974,
+ /* 540 */ 1970, 2024, 1979, 1967, 2025, 2034, 2036, 2037, 2038, 2039,
+ /* 550 */ 2028, 1923, 1924, 2044, 2022, 2046, 2047, 2048, 2049, 2050,
+ /* 560 */ 2051, 2054, 2062, 2055, 2056, 2057, 2058, 2060, 2061, 2059,
+ /* 570 */ 1956, 1938, 1949, 1958, 2063, 2064, 2070, 2085, 2088,
};
-#define YY_REDUCE_COUNT (408)
+#define YY_REDUCE_COUNT (410)
#define YY_REDUCE_MIN (-271)
-#define YY_REDUCE_MAX (1740)
+#define YY_REDUCE_MAX (1753)
static const short yy_reduce_ofst[] = {
/* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187,
/* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489,
- /* 20 */ 576, -175, 598, 686, 615, 725, 860, 778, 781, 857,
- /* 30 */ 616, 887, 87, 240, -192, 408, 626, 796, 843, 854,
- /* 40 */ 1003, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781,
+ /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843,
+ /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271,
/* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
/* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
- /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, 80, 83,
- /* 80 */ 313, 886, 888, 996, 1034, 1059, 1081, 1100, 1117, 1152,
- /* 90 */ 1155, 1163, 1165, 1167, 1169, 1172, 1180, 1182, 1184, 1198,
- /* 100 */ 1200, 1213, 1215, 1225, 1227, 1252, 1254, 1264, 1299, 1303,
- /* 110 */ 1308, 1312, 1325, 1328, 1337, 1340, 1343, 1371, 1373, 1384,
- /* 120 */ 1386, 1411, 1420, 1424, 1426, 1458, 1470, 1473, 1475, 1479,
- /* 130 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
- /* 140 */ -271, 138, 459, 396, -158, 470, 302, -212, 521, 201,
- /* 150 */ -195, -92, 559, 630, 632, 630, -271, 632, 901, 63,
- /* 160 */ 407, -271, -271, -271, -271, 161, 161, 161, 251, 335,
- /* 170 */ 847, 960, 980, 537, 588, 618, 628, 688, 688, -166,
- /* 180 */ -161, 674, 790, 794, 799, 851, 852, -122, 680, -120,
- /* 190 */ 995, 1038, 415, 1051, 893, 798, 962, 400, 1086, 779,
- /* 200 */ 923, 924, 263, 1041, 979, 990, 1083, 1097, 1031, 1194,
- /* 210 */ 362, 994, 1139, 1005, 1037, 1202, 1205, 1195, 1210, -194,
- /* 220 */ 56, 185, -135, 232, 522, 560, 601, 617, 669, 683,
- /* 230 */ 711, 856, 908, 941, 1048, 1101, 1147, 1257, 1262, 1265,
- /* 240 */ 392, 1292, 1333, 1339, 1342, 1346, 1350, 1359, 1374, 1418,
- /* 250 */ 1421, 1436, 1437, 593, 755, 770, 997, 1445, 1459, 1209,
- /* 260 */ 1500, 1504, 1516, 1132, 1243, 1518, 1519, 1440, 1520, 560,
- /* 270 */ 1522, 1523, 1524, 1526, 1527, 1529, 1382, 1438, 1431, 1468,
- /* 280 */ 1469, 1472, 1476, 1209, 1431, 1431, 1485, 1525, 1539, 1435,
- /* 290 */ 1463, 1471, 1492, 1487, 1443, 1494, 1474, 1484, 1498, 1486,
- /* 300 */ 1502, 1455, 1530, 1531, 1533, 1540, 1542, 1544, 1505, 1506,
- /* 310 */ 1507, 1508, 1521, 1528, 1493, 1537, 1532, 1575, 1488, 1496,
- /* 320 */ 1584, 1594, 1509, 1510, 1600, 1538, 1534, 1541, 1574, 1577,
- /* 330 */ 1583, 1585, 1586, 1612, 1626, 1581, 1556, 1558, 1587, 1559,
- /* 340 */ 1601, 1588, 1603, 1592, 1631, 1640, 1550, 1553, 1643, 1645,
- /* 350 */ 1625, 1649, 1652, 1650, 1653, 1632, 1636, 1637, 1642, 1634,
- /* 360 */ 1639, 1641, 1646, 1656, 1655, 1658, 1659, 1661, 1663, 1560,
- /* 370 */ 1564, 1596, 1605, 1664, 1670, 1565, 1571, 1627, 1638, 1657,
- /* 380 */ 1665, 1623, 1702, 1630, 1666, 1667, 1671, 1673, 1703, 1718,
- /* 390 */ 1719, 1729, 1730, 1731, 1621, 1622, 1628, 1720, 1713, 1716,
- /* 400 */ 1722, 1723, 1733, 1717, 1724, 1727, 1728, 1725, 1740,
+ /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80,
+ /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141,
+ /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196,
+ /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303,
+ /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371,
+ /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470,
+ /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521,
+ /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901,
+ /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161,
+ /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688,
+ /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122,
+ /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400,
+ /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139,
+ /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090,
+ /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601,
+ /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081,
+ /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293,
+ /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359,
+ /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487,
+ /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450,
+ /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536,
+ /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495,
+ /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566,
+ /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604,
+ /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560,
+ /* 330 */ 1592, 1591, 1595, 1596, 1597, 1629, 1638, 1594, 1569, 1570,
+ /* 340 */ 1600, 1568, 1610, 1601, 1611, 1603, 1643, 1651, 1562, 1571,
+ /* 350 */ 1655, 1659, 1640, 1663, 1666, 1664, 1667, 1641, 1650, 1652,
+ /* 360 */ 1653, 1647, 1656, 1657, 1658, 1668, 1672, 1681, 1649, 1682,
+ /* 370 */ 1683, 1573, 1582, 1607, 1615, 1685, 1702, 1586, 1587, 1642,
+ /* 380 */ 1646, 1673, 1675, 1636, 1714, 1637, 1677, 1674, 1678, 1694,
+ /* 390 */ 1719, 1734, 1735, 1746, 1747, 1750, 1633, 1661, 1686, 1738,
+ /* 400 */ 1728, 1733, 1736, 1737, 1740, 1726, 1730, 1742, 1743, 1748,
+ /* 410 */ 1753,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1641, 1641, 1641, 1469, 1236, 1347, 1236, 1236, 1236, 1469,
- /* 10 */ 1469, 1469, 1236, 1377, 1377, 1522, 1269, 1236, 1236, 1236,
- /* 20 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1468, 1236, 1236,
- /* 30 */ 1236, 1236, 1557, 1557, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 40 */ 1236, 1236, 1386, 1236, 1393, 1236, 1236, 1236, 1236, 1236,
- /* 50 */ 1470, 1471, 1236, 1236, 1236, 1521, 1523, 1486, 1400, 1399,
- /* 60 */ 1398, 1397, 1504, 1365, 1391, 1384, 1388, 1465, 1466, 1464,
- /* 70 */ 1619, 1471, 1470, 1236, 1387, 1433, 1449, 1432, 1236, 1236,
- /* 80 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 90 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 100 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 110 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 120 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 130 */ 1441, 1448, 1447, 1446, 1455, 1445, 1442, 1435, 1434, 1436,
- /* 140 */ 1437, 1236, 1236, 1260, 1236, 1236, 1257, 1311, 1236, 1236,
- /* 150 */ 1236, 1236, 1236, 1541, 1540, 1236, 1438, 1236, 1269, 1427,
- /* 160 */ 1426, 1452, 1439, 1451, 1450, 1529, 1593, 1592, 1487, 1236,
- /* 170 */ 1236, 1236, 1236, 1236, 1236, 1557, 1236, 1236, 1236, 1236,
- /* 180 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 190 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1367,
- /* 200 */ 1557, 1557, 1236, 1269, 1557, 1557, 1368, 1368, 1265, 1265,
- /* 210 */ 1371, 1236, 1536, 1338, 1338, 1338, 1338, 1347, 1338, 1236,
- /* 220 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 230 */ 1236, 1236, 1236, 1236, 1526, 1524, 1236, 1236, 1236, 1236,
- /* 240 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 250 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 260 */ 1236, 1236, 1236, 1343, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 270 */ 1236, 1236, 1236, 1236, 1236, 1586, 1236, 1499, 1325, 1343,
- /* 280 */ 1343, 1343, 1343, 1345, 1326, 1324, 1337, 1270, 1243, 1633,
- /* 290 */ 1403, 1392, 1344, 1392, 1630, 1390, 1403, 1403, 1390, 1403,
- /* 300 */ 1344, 1630, 1286, 1608, 1281, 1377, 1377, 1377, 1367, 1367,
- /* 310 */ 1367, 1367, 1371, 1371, 1467, 1344, 1337, 1236, 1633, 1633,
- /* 320 */ 1353, 1353, 1632, 1632, 1353, 1487, 1616, 1412, 1314, 1320,
- /* 330 */ 1320, 1320, 1320, 1353, 1254, 1390, 1616, 1616, 1390, 1412,
- /* 340 */ 1314, 1390, 1314, 1390, 1353, 1254, 1503, 1627, 1353, 1254,
- /* 350 */ 1477, 1353, 1254, 1353, 1254, 1477, 1312, 1312, 1312, 1301,
- /* 360 */ 1236, 1236, 1477, 1312, 1286, 1312, 1301, 1312, 1312, 1575,
- /* 370 */ 1236, 1481, 1481, 1477, 1353, 1567, 1567, 1380, 1380, 1385,
- /* 380 */ 1371, 1472, 1353, 1236, 1385, 1383, 1381, 1390, 1304, 1589,
- /* 390 */ 1589, 1585, 1585, 1585, 1638, 1638, 1536, 1601, 1269, 1269,
- /* 400 */ 1269, 1269, 1601, 1288, 1288, 1270, 1270, 1269, 1601, 1236,
- /* 410 */ 1236, 1236, 1236, 1236, 1236, 1596, 1236, 1531, 1488, 1357,
- /* 420 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 430 */ 1236, 1236, 1236, 1236, 1542, 1236, 1236, 1236, 1236, 1236,
- /* 440 */ 1236, 1236, 1236, 1236, 1236, 1417, 1236, 1239, 1533, 1236,
- /* 450 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1394, 1395, 1358,
- /* 460 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1409, 1236, 1236,
- /* 470 */ 1236, 1404, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 480 */ 1629, 1236, 1236, 1236, 1236, 1236, 1236, 1502, 1501, 1236,
- /* 490 */ 1236, 1355, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 500 */ 1236, 1236, 1236, 1236, 1236, 1284, 1236, 1236, 1236, 1236,
- /* 510 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 520 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1382,
- /* 530 */ 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 540 */ 1236, 1236, 1236, 1236, 1572, 1372, 1236, 1236, 1236, 1236,
- /* 550 */ 1620, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236, 1236,
- /* 560 */ 1236, 1236, 1236, 1236, 1236, 1612, 1328, 1418, 1236, 1421,
- /* 570 */ 1258, 1236, 1248, 1236, 1236,
+ /* 0 */ 1648, 1648, 1648, 1478, 1243, 1354, 1243, 1243, 1243, 1478,
+ /* 10 */ 1478, 1478, 1243, 1384, 1384, 1531, 1276, 1243, 1243, 1243,
+ /* 20 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1477, 1243,
+ /* 30 */ 1243, 1243, 1243, 1564, 1564, 1243, 1243, 1243, 1243, 1243,
+ /* 40 */ 1243, 1243, 1243, 1393, 1243, 1400, 1243, 1243, 1243, 1243,
+ /* 50 */ 1243, 1479, 1480, 1243, 1243, 1243, 1530, 1532, 1495, 1407,
+ /* 60 */ 1406, 1405, 1404, 1513, 1372, 1398, 1391, 1395, 1474, 1475,
+ /* 70 */ 1473, 1626, 1480, 1479, 1243, 1394, 1442, 1458, 1441, 1243,
+ /* 80 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 90 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 100 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 110 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 120 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 130 */ 1243, 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443,
+ /* 140 */ 1445, 1446, 1243, 1243, 1267, 1243, 1243, 1264, 1318, 1243,
+ /* 150 */ 1243, 1243, 1243, 1243, 1550, 1549, 1243, 1447, 1243, 1276,
+ /* 160 */ 1435, 1434, 1433, 1461, 1448, 1460, 1459, 1538, 1600, 1599,
+ /* 170 */ 1496, 1243, 1243, 1243, 1243, 1243, 1243, 1564, 1243, 1243,
+ /* 180 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 190 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 200 */ 1243, 1374, 1564, 1564, 1243, 1276, 1564, 1564, 1375, 1375,
+ /* 210 */ 1272, 1272, 1378, 1243, 1545, 1345, 1345, 1345, 1345, 1354,
+ /* 220 */ 1345, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 230 */ 1243, 1243, 1243, 1243, 1243, 1243, 1535, 1533, 1243, 1243,
+ /* 240 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 250 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 260 */ 1243, 1243, 1243, 1243, 1243, 1350, 1243, 1243, 1243, 1243,
+ /* 270 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1593, 1243, 1508,
+ /* 280 */ 1332, 1350, 1350, 1350, 1350, 1352, 1333, 1331, 1344, 1277,
+ /* 290 */ 1250, 1640, 1410, 1399, 1351, 1399, 1637, 1397, 1410, 1410,
+ /* 300 */ 1397, 1410, 1351, 1637, 1293, 1615, 1288, 1384, 1384, 1384,
+ /* 310 */ 1374, 1374, 1374, 1374, 1378, 1378, 1476, 1351, 1344, 1243,
+ /* 320 */ 1640, 1640, 1360, 1360, 1639, 1639, 1360, 1496, 1623, 1419,
+ /* 330 */ 1321, 1327, 1327, 1327, 1327, 1360, 1261, 1397, 1623, 1623,
+ /* 340 */ 1397, 1419, 1321, 1397, 1321, 1397, 1360, 1261, 1512, 1634,
+ /* 350 */ 1360, 1261, 1486, 1360, 1261, 1360, 1261, 1486, 1319, 1319,
+ /* 360 */ 1319, 1308, 1243, 1243, 1486, 1319, 1293, 1319, 1308, 1319,
+ /* 370 */ 1319, 1582, 1243, 1490, 1490, 1486, 1360, 1574, 1574, 1387,
+ /* 380 */ 1387, 1392, 1378, 1481, 1360, 1243, 1392, 1390, 1388, 1397,
+ /* 390 */ 1311, 1596, 1596, 1592, 1592, 1592, 1645, 1645, 1545, 1608,
+ /* 400 */ 1276, 1276, 1276, 1276, 1608, 1295, 1295, 1277, 1277, 1276,
+ /* 410 */ 1608, 1243, 1243, 1243, 1243, 1243, 1243, 1603, 1243, 1540,
+ /* 420 */ 1497, 1364, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 430 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1551, 1243,
+ /* 440 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1424,
+ /* 450 */ 1243, 1246, 1542, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 460 */ 1243, 1401, 1402, 1365, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 470 */ 1243, 1416, 1243, 1243, 1243, 1411, 1243, 1243, 1243, 1243,
+ /* 480 */ 1243, 1243, 1243, 1243, 1636, 1243, 1243, 1243, 1243, 1243,
+ /* 490 */ 1243, 1511, 1510, 1243, 1243, 1362, 1243, 1243, 1243, 1243,
+ /* 500 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1291,
+ /* 510 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 520 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 530 */ 1243, 1243, 1243, 1389, 1243, 1243, 1243, 1243, 1243, 1243,
+ /* 540 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1579, 1379,
+ /* 550 */ 1243, 1243, 1243, 1243, 1627, 1243, 1243, 1243, 1243, 1243,
+ /* 560 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1619,
+ /* 570 */ 1335, 1425, 1243, 1428, 1265, 1243, 1255, 1243, 1243,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -169725,135 +172916,135 @@ static const char *const yyRuleName[] = {
/* 185 */ "expr ::= expr COLLATE ID|STRING",
/* 186 */ "expr ::= CAST LP expr AS typetoken RP",
/* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP",
- /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP",
- /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over",
- /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over",
- /* 191 */ "term ::= CTIME_KW",
- /* 192 */ "expr ::= LP nexprlist COMMA expr RP",
- /* 193 */ "expr ::= expr AND expr",
- /* 194 */ "expr ::= expr OR expr",
- /* 195 */ "expr ::= expr LT|GT|GE|LE expr",
- /* 196 */ "expr ::= expr EQ|NE expr",
- /* 197 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
- /* 198 */ "expr ::= expr PLUS|MINUS expr",
- /* 199 */ "expr ::= expr STAR|SLASH|REM expr",
- /* 200 */ "expr ::= expr CONCAT expr",
- /* 201 */ "likeop ::= NOT LIKE_KW|MATCH",
- /* 202 */ "expr ::= expr likeop expr",
- /* 203 */ "expr ::= expr likeop expr ESCAPE expr",
- /* 204 */ "expr ::= expr ISNULL|NOTNULL",
- /* 205 */ "expr ::= expr NOT NULL",
- /* 206 */ "expr ::= expr IS expr",
- /* 207 */ "expr ::= expr IS NOT expr",
- /* 208 */ "expr ::= expr IS NOT DISTINCT FROM expr",
- /* 209 */ "expr ::= expr IS DISTINCT FROM expr",
- /* 210 */ "expr ::= NOT expr",
- /* 211 */ "expr ::= BITNOT expr",
- /* 212 */ "expr ::= PLUS|MINUS expr",
- /* 213 */ "expr ::= expr PTR expr",
- /* 214 */ "between_op ::= BETWEEN",
- /* 215 */ "between_op ::= NOT BETWEEN",
- /* 216 */ "expr ::= expr between_op expr AND expr",
- /* 217 */ "in_op ::= IN",
- /* 218 */ "in_op ::= NOT IN",
- /* 219 */ "expr ::= expr in_op LP exprlist RP",
- /* 220 */ "expr ::= LP select RP",
- /* 221 */ "expr ::= expr in_op LP select RP",
- /* 222 */ "expr ::= expr in_op nm dbnm paren_exprlist",
- /* 223 */ "expr ::= EXISTS LP select RP",
- /* 224 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 225 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 226 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 227 */ "case_else ::= ELSE expr",
- /* 228 */ "case_else ::=",
- /* 229 */ "case_operand ::=",
- /* 230 */ "exprlist ::=",
- /* 231 */ "nexprlist ::= nexprlist COMMA expr",
- /* 232 */ "nexprlist ::= expr",
- /* 233 */ "paren_exprlist ::=",
- /* 234 */ "paren_exprlist ::= LP exprlist RP",
- /* 235 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 236 */ "uniqueflag ::= UNIQUE",
- /* 237 */ "uniqueflag ::=",
- /* 238 */ "eidlist_opt ::=",
- /* 239 */ "eidlist_opt ::= LP eidlist RP",
- /* 240 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 241 */ "eidlist ::= nm collate sortorder",
- /* 242 */ "collate ::=",
- /* 243 */ "collate ::= COLLATE ID|STRING",
- /* 244 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 245 */ "cmd ::= VACUUM vinto",
- /* 246 */ "cmd ::= VACUUM nm vinto",
- /* 247 */ "vinto ::= INTO expr",
- /* 248 */ "vinto ::=",
- /* 249 */ "cmd ::= PRAGMA nm dbnm",
- /* 250 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 251 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 252 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 253 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 254 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 255 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 256 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 257 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 258 */ "trigger_time ::= BEFORE|AFTER",
- /* 259 */ "trigger_time ::= INSTEAD OF",
- /* 260 */ "trigger_time ::=",
- /* 261 */ "trigger_event ::= DELETE|INSERT",
- /* 262 */ "trigger_event ::= UPDATE",
- /* 263 */ "trigger_event ::= UPDATE OF idlist",
- /* 264 */ "when_clause ::=",
- /* 265 */ "when_clause ::= WHEN expr",
- /* 266 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 267 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 268 */ "trnm ::= nm DOT nm",
- /* 269 */ "tridxby ::= INDEXED BY nm",
- /* 270 */ "tridxby ::= NOT INDEXED",
- /* 271 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
- /* 272 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
- /* 273 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
- /* 274 */ "trigger_cmd ::= scanpt select scanpt",
- /* 275 */ "expr ::= RAISE LP IGNORE RP",
- /* 276 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 277 */ "raisetype ::= ROLLBACK",
- /* 278 */ "raisetype ::= ABORT",
- /* 279 */ "raisetype ::= FAIL",
- /* 280 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 281 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 282 */ "cmd ::= DETACH database_kw_opt expr",
- /* 283 */ "key_opt ::=",
- /* 284 */ "key_opt ::= KEY expr",
- /* 285 */ "cmd ::= REINDEX",
- /* 286 */ "cmd ::= REINDEX nm dbnm",
- /* 287 */ "cmd ::= ANALYZE",
- /* 288 */ "cmd ::= ANALYZE nm dbnm",
- /* 289 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 290 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
- /* 291 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
- /* 292 */ "add_column_fullname ::= fullname",
- /* 293 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
- /* 294 */ "cmd ::= create_vtab",
- /* 295 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 296 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 297 */ "vtabarg ::=",
- /* 298 */ "vtabargtoken ::= ANY",
- /* 299 */ "vtabargtoken ::= lp anylist RP",
- /* 300 */ "lp ::= LP",
- /* 301 */ "with ::= WITH wqlist",
- /* 302 */ "with ::= WITH RECURSIVE wqlist",
- /* 303 */ "wqas ::= AS",
- /* 304 */ "wqas ::= AS MATERIALIZED",
- /* 305 */ "wqas ::= AS NOT MATERIALIZED",
- /* 306 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
- /* 307 */ "wqlist ::= wqitem",
- /* 308 */ "wqlist ::= wqlist COMMA wqitem",
- /* 309 */ "windowdefn_list ::= windowdefn",
- /* 310 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
- /* 311 */ "windowdefn ::= nm AS LP window RP",
- /* 312 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
- /* 313 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
- /* 314 */ "window ::= ORDER BY sortlist frame_opt",
- /* 315 */ "window ::= nm ORDER BY sortlist frame_opt",
- /* 316 */ "window ::= frame_opt",
+ /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP",
+ /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP",
+ /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over",
+ /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over",
+ /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over",
+ /* 193 */ "term ::= CTIME_KW",
+ /* 194 */ "expr ::= LP nexprlist COMMA expr RP",
+ /* 195 */ "expr ::= expr AND expr",
+ /* 196 */ "expr ::= expr OR expr",
+ /* 197 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 198 */ "expr ::= expr EQ|NE expr",
+ /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 200 */ "expr ::= expr PLUS|MINUS expr",
+ /* 201 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 202 */ "expr ::= expr CONCAT expr",
+ /* 203 */ "likeop ::= NOT LIKE_KW|MATCH",
+ /* 204 */ "expr ::= expr likeop expr",
+ /* 205 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 206 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 207 */ "expr ::= expr NOT NULL",
+ /* 208 */ "expr ::= expr IS expr",
+ /* 209 */ "expr ::= expr IS NOT expr",
+ /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr",
+ /* 211 */ "expr ::= expr IS DISTINCT FROM expr",
+ /* 212 */ "expr ::= NOT expr",
+ /* 213 */ "expr ::= BITNOT expr",
+ /* 214 */ "expr ::= PLUS|MINUS expr",
+ /* 215 */ "expr ::= expr PTR expr",
+ /* 216 */ "between_op ::= BETWEEN",
+ /* 217 */ "between_op ::= NOT BETWEEN",
+ /* 218 */ "expr ::= expr between_op expr AND expr",
+ /* 219 */ "in_op ::= IN",
+ /* 220 */ "in_op ::= NOT IN",
+ /* 221 */ "expr ::= expr in_op LP exprlist RP",
+ /* 222 */ "expr ::= LP select RP",
+ /* 223 */ "expr ::= expr in_op LP select RP",
+ /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 225 */ "expr ::= EXISTS LP select RP",
+ /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 228 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 229 */ "case_else ::= ELSE expr",
+ /* 230 */ "case_else ::=",
+ /* 231 */ "case_operand ::=",
+ /* 232 */ "exprlist ::=",
+ /* 233 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 234 */ "nexprlist ::= expr",
+ /* 235 */ "paren_exprlist ::=",
+ /* 236 */ "paren_exprlist ::= LP exprlist RP",
+ /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 238 */ "uniqueflag ::= UNIQUE",
+ /* 239 */ "uniqueflag ::=",
+ /* 240 */ "eidlist_opt ::=",
+ /* 241 */ "eidlist_opt ::= LP eidlist RP",
+ /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 243 */ "eidlist ::= nm collate sortorder",
+ /* 244 */ "collate ::=",
+ /* 245 */ "collate ::= COLLATE ID|STRING",
+ /* 246 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 247 */ "cmd ::= VACUUM vinto",
+ /* 248 */ "cmd ::= VACUUM nm vinto",
+ /* 249 */ "vinto ::= INTO expr",
+ /* 250 */ "vinto ::=",
+ /* 251 */ "cmd ::= PRAGMA nm dbnm",
+ /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 260 */ "trigger_time ::= BEFORE|AFTER",
+ /* 261 */ "trigger_time ::= INSTEAD OF",
+ /* 262 */ "trigger_time ::=",
+ /* 263 */ "trigger_event ::= DELETE|INSERT",
+ /* 264 */ "trigger_event ::= UPDATE",
+ /* 265 */ "trigger_event ::= UPDATE OF idlist",
+ /* 266 */ "when_clause ::=",
+ /* 267 */ "when_clause ::= WHEN expr",
+ /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 270 */ "trnm ::= nm DOT nm",
+ /* 271 */ "tridxby ::= INDEXED BY nm",
+ /* 272 */ "tridxby ::= NOT INDEXED",
+ /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
+ /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
+ /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
+ /* 276 */ "trigger_cmd ::= scanpt select scanpt",
+ /* 277 */ "expr ::= RAISE LP IGNORE RP",
+ /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 279 */ "raisetype ::= ROLLBACK",
+ /* 280 */ "raisetype ::= ABORT",
+ /* 281 */ "raisetype ::= FAIL",
+ /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 284 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 285 */ "key_opt ::=",
+ /* 286 */ "key_opt ::= KEY expr",
+ /* 287 */ "cmd ::= REINDEX",
+ /* 288 */ "cmd ::= REINDEX nm dbnm",
+ /* 289 */ "cmd ::= ANALYZE",
+ /* 290 */ "cmd ::= ANALYZE nm dbnm",
+ /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
+ /* 294 */ "add_column_fullname ::= fullname",
+ /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
+ /* 296 */ "cmd ::= create_vtab",
+ /* 297 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 299 */ "vtabarg ::=",
+ /* 300 */ "vtabargtoken ::= ANY",
+ /* 301 */ "vtabargtoken ::= lp anylist RP",
+ /* 302 */ "lp ::= LP",
+ /* 303 */ "with ::= WITH wqlist",
+ /* 304 */ "with ::= WITH RECURSIVE wqlist",
+ /* 305 */ "wqas ::= AS",
+ /* 306 */ "wqas ::= AS MATERIALIZED",
+ /* 307 */ "wqas ::= AS NOT MATERIALIZED",
+ /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
+ /* 309 */ "wqlist ::= wqitem",
+ /* 310 */ "wqlist ::= wqlist COMMA wqitem",
+ /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
+ /* 312 */ "windowdefn ::= nm AS LP window RP",
+ /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 315 */ "window ::= ORDER BY sortlist frame_opt",
+ /* 316 */ "window ::= nm ORDER BY sortlist frame_opt",
/* 317 */ "window ::= nm frame_opt",
/* 318 */ "frame_opt ::=",
/* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt",
@@ -169940,6 +173131,8 @@ static const char *const yyRuleName[] = {
/* 400 */ "anylist ::= anylist LP anylist RP",
/* 401 */ "anylist ::= anylist ANY",
/* 402 */ "with ::=",
+ /* 403 */ "windowdefn_list ::= windowdefn",
+ /* 404 */ "window ::= frame_opt",
};
#endif /* NDEBUG */
@@ -170634,135 +173827,135 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
217, /* (185) expr ::= expr COLLATE ID|STRING */
217, /* (186) expr ::= CAST LP expr AS typetoken RP */
217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */
- 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
- 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
- 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
- 216, /* (191) term ::= CTIME_KW */
- 217, /* (192) expr ::= LP nexprlist COMMA expr RP */
- 217, /* (193) expr ::= expr AND expr */
- 217, /* (194) expr ::= expr OR expr */
- 217, /* (195) expr ::= expr LT|GT|GE|LE expr */
- 217, /* (196) expr ::= expr EQ|NE expr */
- 217, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
- 217, /* (198) expr ::= expr PLUS|MINUS expr */
- 217, /* (199) expr ::= expr STAR|SLASH|REM expr */
- 217, /* (200) expr ::= expr CONCAT expr */
- 274, /* (201) likeop ::= NOT LIKE_KW|MATCH */
- 217, /* (202) expr ::= expr likeop expr */
- 217, /* (203) expr ::= expr likeop expr ESCAPE expr */
- 217, /* (204) expr ::= expr ISNULL|NOTNULL */
- 217, /* (205) expr ::= expr NOT NULL */
- 217, /* (206) expr ::= expr IS expr */
- 217, /* (207) expr ::= expr IS NOT expr */
- 217, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */
- 217, /* (209) expr ::= expr IS DISTINCT FROM expr */
- 217, /* (210) expr ::= NOT expr */
- 217, /* (211) expr ::= BITNOT expr */
- 217, /* (212) expr ::= PLUS|MINUS expr */
- 217, /* (213) expr ::= expr PTR expr */
- 275, /* (214) between_op ::= BETWEEN */
- 275, /* (215) between_op ::= NOT BETWEEN */
- 217, /* (216) expr ::= expr between_op expr AND expr */
- 276, /* (217) in_op ::= IN */
- 276, /* (218) in_op ::= NOT IN */
- 217, /* (219) expr ::= expr in_op LP exprlist RP */
- 217, /* (220) expr ::= LP select RP */
- 217, /* (221) expr ::= expr in_op LP select RP */
- 217, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */
- 217, /* (223) expr ::= EXISTS LP select RP */
- 217, /* (224) expr ::= CASE case_operand case_exprlist case_else END */
- 279, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- 279, /* (226) case_exprlist ::= WHEN expr THEN expr */
- 280, /* (227) case_else ::= ELSE expr */
- 280, /* (228) case_else ::= */
- 278, /* (229) case_operand ::= */
- 261, /* (230) exprlist ::= */
- 253, /* (231) nexprlist ::= nexprlist COMMA expr */
- 253, /* (232) nexprlist ::= expr */
- 277, /* (233) paren_exprlist ::= */
- 277, /* (234) paren_exprlist ::= LP exprlist RP */
- 190, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- 281, /* (236) uniqueflag ::= UNIQUE */
- 281, /* (237) uniqueflag ::= */
- 221, /* (238) eidlist_opt ::= */
- 221, /* (239) eidlist_opt ::= LP eidlist RP */
- 232, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- 232, /* (241) eidlist ::= nm collate sortorder */
- 282, /* (242) collate ::= */
- 282, /* (243) collate ::= COLLATE ID|STRING */
- 190, /* (244) cmd ::= DROP INDEX ifexists fullname */
- 190, /* (245) cmd ::= VACUUM vinto */
- 190, /* (246) cmd ::= VACUUM nm vinto */
- 283, /* (247) vinto ::= INTO expr */
- 283, /* (248) vinto ::= */
- 190, /* (249) cmd ::= PRAGMA nm dbnm */
- 190, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- 190, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- 190, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- 190, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- 211, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- 212, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- 190, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- 285, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- 287, /* (258) trigger_time ::= BEFORE|AFTER */
- 287, /* (259) trigger_time ::= INSTEAD OF */
- 287, /* (260) trigger_time ::= */
- 288, /* (261) trigger_event ::= DELETE|INSERT */
- 288, /* (262) trigger_event ::= UPDATE */
- 288, /* (263) trigger_event ::= UPDATE OF idlist */
- 290, /* (264) when_clause ::= */
- 290, /* (265) when_clause ::= WHEN expr */
- 286, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- 286, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- 292, /* (268) trnm ::= nm DOT nm */
- 293, /* (269) tridxby ::= INDEXED BY nm */
- 293, /* (270) tridxby ::= NOT INDEXED */
- 291, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- 291, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- 291, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- 291, /* (274) trigger_cmd ::= scanpt select scanpt */
- 217, /* (275) expr ::= RAISE LP IGNORE RP */
- 217, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- 236, /* (277) raisetype ::= ROLLBACK */
- 236, /* (278) raisetype ::= ABORT */
- 236, /* (279) raisetype ::= FAIL */
- 190, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- 190, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- 190, /* (282) cmd ::= DETACH database_kw_opt expr */
- 295, /* (283) key_opt ::= */
- 295, /* (284) key_opt ::= KEY expr */
- 190, /* (285) cmd ::= REINDEX */
- 190, /* (286) cmd ::= REINDEX nm dbnm */
- 190, /* (287) cmd ::= ANALYZE */
- 190, /* (288) cmd ::= ANALYZE nm dbnm */
- 190, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- 190, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- 190, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- 296, /* (292) add_column_fullname ::= fullname */
- 190, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- 190, /* (294) cmd ::= create_vtab */
- 190, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- 298, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 300, /* (297) vtabarg ::= */
- 301, /* (298) vtabargtoken ::= ANY */
- 301, /* (299) vtabargtoken ::= lp anylist RP */
- 302, /* (300) lp ::= LP */
- 266, /* (301) with ::= WITH wqlist */
- 266, /* (302) with ::= WITH RECURSIVE wqlist */
- 305, /* (303) wqas ::= AS */
- 305, /* (304) wqas ::= AS MATERIALIZED */
- 305, /* (305) wqas ::= AS NOT MATERIALIZED */
- 304, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- 241, /* (307) wqlist ::= wqitem */
- 241, /* (308) wqlist ::= wqlist COMMA wqitem */
- 306, /* (309) windowdefn_list ::= windowdefn */
- 306, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- 307, /* (311) windowdefn ::= nm AS LP window RP */
- 308, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (314) window ::= ORDER BY sortlist frame_opt */
- 308, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- 308, /* (316) window ::= frame_opt */
+ 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+ 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+ 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ 216, /* (193) term ::= CTIME_KW */
+ 217, /* (194) expr ::= LP nexprlist COMMA expr RP */
+ 217, /* (195) expr ::= expr AND expr */
+ 217, /* (196) expr ::= expr OR expr */
+ 217, /* (197) expr ::= expr LT|GT|GE|LE expr */
+ 217, /* (198) expr ::= expr EQ|NE expr */
+ 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ 217, /* (200) expr ::= expr PLUS|MINUS expr */
+ 217, /* (201) expr ::= expr STAR|SLASH|REM expr */
+ 217, /* (202) expr ::= expr CONCAT expr */
+ 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */
+ 217, /* (204) expr ::= expr likeop expr */
+ 217, /* (205) expr ::= expr likeop expr ESCAPE expr */
+ 217, /* (206) expr ::= expr ISNULL|NOTNULL */
+ 217, /* (207) expr ::= expr NOT NULL */
+ 217, /* (208) expr ::= expr IS expr */
+ 217, /* (209) expr ::= expr IS NOT expr */
+ 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */
+ 217, /* (211) expr ::= expr IS DISTINCT FROM expr */
+ 217, /* (212) expr ::= NOT expr */
+ 217, /* (213) expr ::= BITNOT expr */
+ 217, /* (214) expr ::= PLUS|MINUS expr */
+ 217, /* (215) expr ::= expr PTR expr */
+ 275, /* (216) between_op ::= BETWEEN */
+ 275, /* (217) between_op ::= NOT BETWEEN */
+ 217, /* (218) expr ::= expr between_op expr AND expr */
+ 276, /* (219) in_op ::= IN */
+ 276, /* (220) in_op ::= NOT IN */
+ 217, /* (221) expr ::= expr in_op LP exprlist RP */
+ 217, /* (222) expr ::= LP select RP */
+ 217, /* (223) expr ::= expr in_op LP select RP */
+ 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */
+ 217, /* (225) expr ::= EXISTS LP select RP */
+ 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */
+ 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ 279, /* (228) case_exprlist ::= WHEN expr THEN expr */
+ 280, /* (229) case_else ::= ELSE expr */
+ 280, /* (230) case_else ::= */
+ 278, /* (231) case_operand ::= */
+ 261, /* (232) exprlist ::= */
+ 253, /* (233) nexprlist ::= nexprlist COMMA expr */
+ 253, /* (234) nexprlist ::= expr */
+ 277, /* (235) paren_exprlist ::= */
+ 277, /* (236) paren_exprlist ::= LP exprlist RP */
+ 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ 281, /* (238) uniqueflag ::= UNIQUE */
+ 281, /* (239) uniqueflag ::= */
+ 221, /* (240) eidlist_opt ::= */
+ 221, /* (241) eidlist_opt ::= LP eidlist RP */
+ 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ 232, /* (243) eidlist ::= nm collate sortorder */
+ 282, /* (244) collate ::= */
+ 282, /* (245) collate ::= COLLATE ID|STRING */
+ 190, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ 190, /* (247) cmd ::= VACUUM vinto */
+ 190, /* (248) cmd ::= VACUUM nm vinto */
+ 283, /* (249) vinto ::= INTO expr */
+ 283, /* (250) vinto ::= */
+ 190, /* (251) cmd ::= PRAGMA nm dbnm */
+ 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ 287, /* (260) trigger_time ::= BEFORE|AFTER */
+ 287, /* (261) trigger_time ::= INSTEAD OF */
+ 287, /* (262) trigger_time ::= */
+ 288, /* (263) trigger_event ::= DELETE|INSERT */
+ 288, /* (264) trigger_event ::= UPDATE */
+ 288, /* (265) trigger_event ::= UPDATE OF idlist */
+ 290, /* (266) when_clause ::= */
+ 290, /* (267) when_clause ::= WHEN expr */
+ 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ 292, /* (270) trnm ::= nm DOT nm */
+ 293, /* (271) tridxby ::= INDEXED BY nm */
+ 293, /* (272) tridxby ::= NOT INDEXED */
+ 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ 291, /* (276) trigger_cmd ::= scanpt select scanpt */
+ 217, /* (277) expr ::= RAISE LP IGNORE RP */
+ 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ 236, /* (279) raisetype ::= ROLLBACK */
+ 236, /* (280) raisetype ::= ABORT */
+ 236, /* (281) raisetype ::= FAIL */
+ 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ 190, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 295, /* (285) key_opt ::= */
+ 295, /* (286) key_opt ::= KEY expr */
+ 190, /* (287) cmd ::= REINDEX */
+ 190, /* (288) cmd ::= REINDEX nm dbnm */
+ 190, /* (289) cmd ::= ANALYZE */
+ 190, /* (290) cmd ::= ANALYZE nm dbnm */
+ 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ 296, /* (294) add_column_fullname ::= fullname */
+ 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ 190, /* (296) cmd ::= create_vtab */
+ 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 300, /* (299) vtabarg ::= */
+ 301, /* (300) vtabargtoken ::= ANY */
+ 301, /* (301) vtabargtoken ::= lp anylist RP */
+ 302, /* (302) lp ::= LP */
+ 266, /* (303) with ::= WITH wqlist */
+ 266, /* (304) with ::= WITH RECURSIVE wqlist */
+ 305, /* (305) wqas ::= AS */
+ 305, /* (306) wqas ::= AS MATERIALIZED */
+ 305, /* (307) wqas ::= AS NOT MATERIALIZED */
+ 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ 241, /* (309) wqlist ::= wqitem */
+ 241, /* (310) wqlist ::= wqlist COMMA wqitem */
+ 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ 307, /* (312) windowdefn ::= nm AS LP window RP */
+ 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (315) window ::= ORDER BY sortlist frame_opt */
+ 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */
308, /* (317) window ::= nm frame_opt */
309, /* (318) frame_opt ::= */
309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
@@ -170849,6 +174042,8 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
303, /* (400) anylist ::= anylist LP anylist RP */
303, /* (401) anylist ::= anylist ANY */
266, /* (402) with ::= */
+ 306, /* (403) windowdefn_list ::= windowdefn */
+ 308, /* (404) window ::= frame_opt */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -171042,135 +174237,135 @@ static const signed char yyRuleInfoNRhs[] = {
-3, /* (185) expr ::= expr COLLATE ID|STRING */
-6, /* (186) expr ::= CAST LP expr AS typetoken RP */
-5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */
- -4, /* (188) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
- -6, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
- -5, /* (190) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
- -1, /* (191) term ::= CTIME_KW */
- -5, /* (192) expr ::= LP nexprlist COMMA expr RP */
- -3, /* (193) expr ::= expr AND expr */
- -3, /* (194) expr ::= expr OR expr */
- -3, /* (195) expr ::= expr LT|GT|GE|LE expr */
- -3, /* (196) expr ::= expr EQ|NE expr */
- -3, /* (197) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
- -3, /* (198) expr ::= expr PLUS|MINUS expr */
- -3, /* (199) expr ::= expr STAR|SLASH|REM expr */
- -3, /* (200) expr ::= expr CONCAT expr */
- -2, /* (201) likeop ::= NOT LIKE_KW|MATCH */
- -3, /* (202) expr ::= expr likeop expr */
- -5, /* (203) expr ::= expr likeop expr ESCAPE expr */
- -2, /* (204) expr ::= expr ISNULL|NOTNULL */
- -3, /* (205) expr ::= expr NOT NULL */
- -3, /* (206) expr ::= expr IS expr */
- -4, /* (207) expr ::= expr IS NOT expr */
- -6, /* (208) expr ::= expr IS NOT DISTINCT FROM expr */
- -5, /* (209) expr ::= expr IS DISTINCT FROM expr */
- -2, /* (210) expr ::= NOT expr */
- -2, /* (211) expr ::= BITNOT expr */
- -2, /* (212) expr ::= PLUS|MINUS expr */
- -3, /* (213) expr ::= expr PTR expr */
- -1, /* (214) between_op ::= BETWEEN */
- -2, /* (215) between_op ::= NOT BETWEEN */
- -5, /* (216) expr ::= expr between_op expr AND expr */
- -1, /* (217) in_op ::= IN */
- -2, /* (218) in_op ::= NOT IN */
- -5, /* (219) expr ::= expr in_op LP exprlist RP */
- -3, /* (220) expr ::= LP select RP */
- -5, /* (221) expr ::= expr in_op LP select RP */
- -5, /* (222) expr ::= expr in_op nm dbnm paren_exprlist */
- -4, /* (223) expr ::= EXISTS LP select RP */
- -5, /* (224) expr ::= CASE case_operand case_exprlist case_else END */
- -5, /* (225) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- -4, /* (226) case_exprlist ::= WHEN expr THEN expr */
- -2, /* (227) case_else ::= ELSE expr */
- 0, /* (228) case_else ::= */
- 0, /* (229) case_operand ::= */
- 0, /* (230) exprlist ::= */
- -3, /* (231) nexprlist ::= nexprlist COMMA expr */
- -1, /* (232) nexprlist ::= expr */
- 0, /* (233) paren_exprlist ::= */
- -3, /* (234) paren_exprlist ::= LP exprlist RP */
- -12, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- -1, /* (236) uniqueflag ::= UNIQUE */
- 0, /* (237) uniqueflag ::= */
- 0, /* (238) eidlist_opt ::= */
- -3, /* (239) eidlist_opt ::= LP eidlist RP */
- -5, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- -3, /* (241) eidlist ::= nm collate sortorder */
- 0, /* (242) collate ::= */
- -2, /* (243) collate ::= COLLATE ID|STRING */
- -4, /* (244) cmd ::= DROP INDEX ifexists fullname */
- -2, /* (245) cmd ::= VACUUM vinto */
- -3, /* (246) cmd ::= VACUUM nm vinto */
- -2, /* (247) vinto ::= INTO expr */
- 0, /* (248) vinto ::= */
- -3, /* (249) cmd ::= PRAGMA nm dbnm */
- -5, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- -6, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- -5, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- -6, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- -2, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- -2, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- -5, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- -11, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- -1, /* (258) trigger_time ::= BEFORE|AFTER */
- -2, /* (259) trigger_time ::= INSTEAD OF */
- 0, /* (260) trigger_time ::= */
- -1, /* (261) trigger_event ::= DELETE|INSERT */
- -1, /* (262) trigger_event ::= UPDATE */
- -3, /* (263) trigger_event ::= UPDATE OF idlist */
- 0, /* (264) when_clause ::= */
- -2, /* (265) when_clause ::= WHEN expr */
- -3, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- -2, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- -3, /* (268) trnm ::= nm DOT nm */
- -3, /* (269) tridxby ::= INDEXED BY nm */
- -2, /* (270) tridxby ::= NOT INDEXED */
- -9, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- -8, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- -6, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- -3, /* (274) trigger_cmd ::= scanpt select scanpt */
- -4, /* (275) expr ::= RAISE LP IGNORE RP */
- -6, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- -1, /* (277) raisetype ::= ROLLBACK */
- -1, /* (278) raisetype ::= ABORT */
- -1, /* (279) raisetype ::= FAIL */
- -4, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- -6, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- -3, /* (282) cmd ::= DETACH database_kw_opt expr */
- 0, /* (283) key_opt ::= */
- -2, /* (284) key_opt ::= KEY expr */
- -1, /* (285) cmd ::= REINDEX */
- -3, /* (286) cmd ::= REINDEX nm dbnm */
- -1, /* (287) cmd ::= ANALYZE */
- -3, /* (288) cmd ::= ANALYZE nm dbnm */
- -6, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- -7, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- -6, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- -1, /* (292) add_column_fullname ::= fullname */
- -8, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- -1, /* (294) cmd ::= create_vtab */
- -4, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- -8, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 0, /* (297) vtabarg ::= */
- -1, /* (298) vtabargtoken ::= ANY */
- -3, /* (299) vtabargtoken ::= lp anylist RP */
- -1, /* (300) lp ::= LP */
- -2, /* (301) with ::= WITH wqlist */
- -3, /* (302) with ::= WITH RECURSIVE wqlist */
- -1, /* (303) wqas ::= AS */
- -2, /* (304) wqas ::= AS MATERIALIZED */
- -3, /* (305) wqas ::= AS NOT MATERIALIZED */
- -6, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- -1, /* (307) wqlist ::= wqitem */
- -3, /* (308) wqlist ::= wqlist COMMA wqitem */
- -1, /* (309) windowdefn_list ::= windowdefn */
- -3, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- -5, /* (311) windowdefn ::= nm AS LP window RP */
- -5, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- -6, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- -4, /* (314) window ::= ORDER BY sortlist frame_opt */
- -5, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- -1, /* (316) window ::= frame_opt */
+ -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+ -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+ -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ -1, /* (193) term ::= CTIME_KW */
+ -5, /* (194) expr ::= LP nexprlist COMMA expr RP */
+ -3, /* (195) expr ::= expr AND expr */
+ -3, /* (196) expr ::= expr OR expr */
+ -3, /* (197) expr ::= expr LT|GT|GE|LE expr */
+ -3, /* (198) expr ::= expr EQ|NE expr */
+ -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ -3, /* (200) expr ::= expr PLUS|MINUS expr */
+ -3, /* (201) expr ::= expr STAR|SLASH|REM expr */
+ -3, /* (202) expr ::= expr CONCAT expr */
+ -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */
+ -3, /* (204) expr ::= expr likeop expr */
+ -5, /* (205) expr ::= expr likeop expr ESCAPE expr */
+ -2, /* (206) expr ::= expr ISNULL|NOTNULL */
+ -3, /* (207) expr ::= expr NOT NULL */
+ -3, /* (208) expr ::= expr IS expr */
+ -4, /* (209) expr ::= expr IS NOT expr */
+ -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */
+ -5, /* (211) expr ::= expr IS DISTINCT FROM expr */
+ -2, /* (212) expr ::= NOT expr */
+ -2, /* (213) expr ::= BITNOT expr */
+ -2, /* (214) expr ::= PLUS|MINUS expr */
+ -3, /* (215) expr ::= expr PTR expr */
+ -1, /* (216) between_op ::= BETWEEN */
+ -2, /* (217) between_op ::= NOT BETWEEN */
+ -5, /* (218) expr ::= expr between_op expr AND expr */
+ -1, /* (219) in_op ::= IN */
+ -2, /* (220) in_op ::= NOT IN */
+ -5, /* (221) expr ::= expr in_op LP exprlist RP */
+ -3, /* (222) expr ::= LP select RP */
+ -5, /* (223) expr ::= expr in_op LP select RP */
+ -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */
+ -4, /* (225) expr ::= EXISTS LP select RP */
+ -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */
+ -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ -4, /* (228) case_exprlist ::= WHEN expr THEN expr */
+ -2, /* (229) case_else ::= ELSE expr */
+ 0, /* (230) case_else ::= */
+ 0, /* (231) case_operand ::= */
+ 0, /* (232) exprlist ::= */
+ -3, /* (233) nexprlist ::= nexprlist COMMA expr */
+ -1, /* (234) nexprlist ::= expr */
+ 0, /* (235) paren_exprlist ::= */
+ -3, /* (236) paren_exprlist ::= LP exprlist RP */
+ -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ -1, /* (238) uniqueflag ::= UNIQUE */
+ 0, /* (239) uniqueflag ::= */
+ 0, /* (240) eidlist_opt ::= */
+ -3, /* (241) eidlist_opt ::= LP eidlist RP */
+ -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ -3, /* (243) eidlist ::= nm collate sortorder */
+ 0, /* (244) collate ::= */
+ -2, /* (245) collate ::= COLLATE ID|STRING */
+ -4, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ -2, /* (247) cmd ::= VACUUM vinto */
+ -3, /* (248) cmd ::= VACUUM nm vinto */
+ -2, /* (249) vinto ::= INTO expr */
+ 0, /* (250) vinto ::= */
+ -3, /* (251) cmd ::= PRAGMA nm dbnm */
+ -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ -1, /* (260) trigger_time ::= BEFORE|AFTER */
+ -2, /* (261) trigger_time ::= INSTEAD OF */
+ 0, /* (262) trigger_time ::= */
+ -1, /* (263) trigger_event ::= DELETE|INSERT */
+ -1, /* (264) trigger_event ::= UPDATE */
+ -3, /* (265) trigger_event ::= UPDATE OF idlist */
+ 0, /* (266) when_clause ::= */
+ -2, /* (267) when_clause ::= WHEN expr */
+ -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ -3, /* (270) trnm ::= nm DOT nm */
+ -3, /* (271) tridxby ::= INDEXED BY nm */
+ -2, /* (272) tridxby ::= NOT INDEXED */
+ -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ -3, /* (276) trigger_cmd ::= scanpt select scanpt */
+ -4, /* (277) expr ::= RAISE LP IGNORE RP */
+ -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ -1, /* (279) raisetype ::= ROLLBACK */
+ -1, /* (280) raisetype ::= ABORT */
+ -1, /* (281) raisetype ::= FAIL */
+ -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ -3, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 0, /* (285) key_opt ::= */
+ -2, /* (286) key_opt ::= KEY expr */
+ -1, /* (287) cmd ::= REINDEX */
+ -3, /* (288) cmd ::= REINDEX nm dbnm */
+ -1, /* (289) cmd ::= ANALYZE */
+ -3, /* (290) cmd ::= ANALYZE nm dbnm */
+ -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ -1, /* (294) add_column_fullname ::= fullname */
+ -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ -1, /* (296) cmd ::= create_vtab */
+ -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 0, /* (299) vtabarg ::= */
+ -1, /* (300) vtabargtoken ::= ANY */
+ -3, /* (301) vtabargtoken ::= lp anylist RP */
+ -1, /* (302) lp ::= LP */
+ -2, /* (303) with ::= WITH wqlist */
+ -3, /* (304) with ::= WITH RECURSIVE wqlist */
+ -1, /* (305) wqas ::= AS */
+ -2, /* (306) wqas ::= AS MATERIALIZED */
+ -3, /* (307) wqas ::= AS NOT MATERIALIZED */
+ -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ -1, /* (309) wqlist ::= wqitem */
+ -3, /* (310) wqlist ::= wqlist COMMA wqitem */
+ -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ -5, /* (312) windowdefn ::= nm AS LP window RP */
+ -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ -4, /* (315) window ::= ORDER BY sortlist frame_opt */
+ -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */
-2, /* (317) window ::= nm frame_opt */
0, /* (318) frame_opt ::= */
-3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
@@ -171257,6 +174452,8 @@ static const signed char yyRuleInfoNRhs[] = {
-4, /* (400) anylist ::= anylist LP anylist RP */
-2, /* (401) anylist ::= anylist ANY */
0, /* (402) with ::= */
+ -1, /* (403) windowdefn_list ::= windowdefn */
+ -1, /* (404) window ::= frame_opt */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -171299,10 +174496,10 @@ static YYACTIONTYPE yy_reduce(
/********** Begin reduce actions **********************************************/
YYMINORTYPE yylhsminor;
case 0: /* explain ::= EXPLAIN */
-{ pParse->explain = 1; }
+{ if( pParse->pReprepare==0 ) pParse->explain = 1; }
break;
case 1: /* explain ::= EXPLAIN QUERY PLAN */
-{ pParse->explain = 2; }
+{ if( pParse->pReprepare==0 ) pParse->explain = 2; }
break;
case 2: /* cmdx ::= cmd */
{ sqlite3FinishCoding(pParse); }
@@ -171353,7 +174550,7 @@ static YYACTIONTYPE yy_reduce(
case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72);
case 81: /* ifexists ::= */ yytestcase(yyruleno==81);
case 98: /* distinct ::= */ yytestcase(yyruleno==98);
- case 242: /* collate ::= */ yytestcase(yyruleno==242);
+ case 244: /* collate ::= */ yytestcase(yyruleno==244);
{yymsp[1].minor.yy394 = 0;}
break;
case 16: /* ifnotexists ::= IF NOT EXISTS */
@@ -171537,9 +174734,9 @@ static YYACTIONTYPE yy_reduce(
break;
case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80);
- case 215: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==215);
- case 218: /* in_op ::= NOT IN */ yytestcase(yyruleno==218);
- case 243: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==243);
+ case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217);
+ case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220);
+ case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245);
{yymsp[-1].minor.yy394 = 1;}
break;
case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
@@ -171612,7 +174809,6 @@ static YYACTIONTYPE yy_reduce(
if( p ){
parserDoubleLinkSelect(pParse, p);
}
- yymsp[0].minor.yy47 = p; /*A-overwrites-X*/
}
break;
case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */
@@ -171689,9 +174885,9 @@ static YYACTIONTYPE yy_reduce(
case 99: /* sclp ::= */
case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132);
case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142);
- case 230: /* exprlist ::= */ yytestcase(yyruleno==230);
- case 233: /* paren_exprlist ::= */ yytestcase(yyruleno==233);
- case 238: /* eidlist_opt ::= */ yytestcase(yyruleno==238);
+ case 232: /* exprlist ::= */ yytestcase(yyruleno==232);
+ case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235);
+ case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240);
{yymsp[1].minor.yy322 = 0;}
break;
case 100: /* selcollist ::= sclp scanpt expr scanpt as */
@@ -171704,21 +174900,24 @@ static YYACTIONTYPE yy_reduce(
case 101: /* selcollist ::= sclp scanpt STAR */
{
Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0);
+ sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail));
yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p);
}
break;
case 102: /* selcollist ::= sclp scanpt nm DOT STAR */
{
- Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
- Expr *pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0);
- Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
+ Expr *pRight, *pLeft, *pDot;
+ pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
+ sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail));
+ pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0);
+ pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot);
}
break;
case 103: /* as ::= AS nm */
case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115);
- case 254: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==254);
- case 255: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==255);
+ case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256);
+ case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
break;
case 105: /* from ::= */
@@ -171890,16 +175089,16 @@ static YYACTIONTYPE yy_reduce(
case 146: /* limit_opt ::= */ yytestcase(yyruleno==146);
case 151: /* where_opt ::= */ yytestcase(yyruleno==151);
case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153);
- case 228: /* case_else ::= */ yytestcase(yyruleno==228);
- case 229: /* case_operand ::= */ yytestcase(yyruleno==229);
- case 248: /* vinto ::= */ yytestcase(yyruleno==248);
+ case 230: /* case_else ::= */ yytestcase(yyruleno==230);
+ case 231: /* case_operand ::= */ yytestcase(yyruleno==231);
+ case 250: /* vinto ::= */ yytestcase(yyruleno==250);
{yymsp[1].minor.yy528 = 0;}
break;
case 145: /* having_opt ::= HAVING expr */
case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152);
case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154);
- case 227: /* case_else ::= ELSE expr */ yytestcase(yyruleno==227);
- case 247: /* vinto ::= INTO expr */ yytestcase(yyruleno==247);
+ case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229);
+ case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249);
{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;}
break;
case 147: /* limit_opt ::= LIMIT expr */
@@ -172085,33 +175284,48 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-4].minor.yy528 = yylhsminor.yy528;
break;
- case 188: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
+ case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */
+{
+ yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394);
+ sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322);
+}
+ yymsp[-7].minor.yy528 = yylhsminor.yy528;
+ break;
+ case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0);
}
yymsp[-3].minor.yy528 = yylhsminor.yy528;
break;
- case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
+ case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394);
sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
}
yymsp[-5].minor.yy528 = yylhsminor.yy528;
break;
- case 190: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
+ case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */
+{
+ yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394);
+ sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
+ sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322);
+}
+ yymsp[-8].minor.yy528 = yylhsminor.yy528;
+ break;
+ case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0);
sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41);
}
yymsp[-4].minor.yy528 = yylhsminor.yy528;
break;
- case 191: /* term ::= CTIME_KW */
+ case 193: /* term ::= CTIME_KW */
{
yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0);
}
yymsp[0].minor.yy528 = yylhsminor.yy528;
break;
- case 192: /* expr ::= LP nexprlist COMMA expr RP */
+ case 194: /* expr ::= LP nexprlist COMMA expr RP */
{
ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528);
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
@@ -172125,22 +175339,22 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 193: /* expr ::= expr AND expr */
+ case 195: /* expr ::= expr AND expr */
{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);}
break;
- case 194: /* expr ::= expr OR expr */
- case 195: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==195);
- case 196: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==196);
- case 197: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==197);
- case 198: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==198);
- case 199: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==199);
- case 200: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==200);
+ case 196: /* expr ::= expr OR expr */
+ case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197);
+ case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198);
+ case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199);
+ case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200);
+ case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201);
+ case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202);
{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);}
break;
- case 201: /* likeop ::= NOT LIKE_KW|MATCH */
+ case 203: /* likeop ::= NOT LIKE_KW|MATCH */
{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/}
break;
- case 202: /* expr ::= expr likeop expr */
+ case 204: /* expr ::= expr likeop expr */
{
ExprList *pList;
int bNot = yymsp[-1].minor.yy0.n & 0x80000000;
@@ -172152,7 +175366,7 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc;
}
break;
- case 203: /* expr ::= expr likeop expr ESCAPE expr */
+ case 205: /* expr ::= expr likeop expr ESCAPE expr */
{
ExprList *pList;
int bNot = yymsp[-3].minor.yy0.n & 0x80000000;
@@ -172165,47 +175379,47 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc;
}
break;
- case 204: /* expr ::= expr ISNULL|NOTNULL */
+ case 206: /* expr ::= expr ISNULL|NOTNULL */
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);}
break;
- case 205: /* expr ::= expr NOT NULL */
+ case 207: /* expr ::= expr NOT NULL */
{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);}
break;
- case 206: /* expr ::= expr IS expr */
+ case 208: /* expr ::= expr IS expr */
{
yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL);
}
break;
- case 207: /* expr ::= expr IS NOT expr */
+ case 209: /* expr ::= expr IS NOT expr */
{
yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL);
}
break;
- case 208: /* expr ::= expr IS NOT DISTINCT FROM expr */
+ case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */
{
yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL);
}
break;
- case 209: /* expr ::= expr IS DISTINCT FROM expr */
+ case 211: /* expr ::= expr IS DISTINCT FROM expr */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL);
}
break;
- case 210: /* expr ::= NOT expr */
- case 211: /* expr ::= BITNOT expr */ yytestcase(yyruleno==211);
+ case 212: /* expr ::= NOT expr */
+ case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213);
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/}
break;
- case 212: /* expr ::= PLUS|MINUS expr */
+ case 214: /* expr ::= PLUS|MINUS expr */
{
yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0);
/*A-overwrites-B*/
}
break;
- case 213: /* expr ::= expr PTR expr */
+ case 215: /* expr ::= expr PTR expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528);
@@ -172213,11 +175427,11 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy528 = yylhsminor.yy528;
break;
- case 214: /* between_op ::= BETWEEN */
- case 217: /* in_op ::= IN */ yytestcase(yyruleno==217);
+ case 216: /* between_op ::= BETWEEN */
+ case 219: /* in_op ::= IN */ yytestcase(yyruleno==219);
{yymsp[0].minor.yy394 = 0;}
break;
- case 216: /* expr ::= expr between_op expr AND expr */
+ case 218: /* expr ::= expr between_op expr AND expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528);
@@ -172230,7 +175444,7 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 219: /* expr ::= expr in_op LP exprlist RP */
+ case 221: /* expr ::= expr in_op LP exprlist RP */
{
if( yymsp[-1].minor.yy322==0 ){
/* Expressions of the form
@@ -172276,20 +175490,20 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 220: /* expr ::= LP select RP */
+ case 222: /* expr ::= LP select RP */
{
yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47);
}
break;
- case 221: /* expr ::= expr in_op LP select RP */
+ case 223: /* expr ::= expr in_op LP select RP */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47);
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 222: /* expr ::= expr in_op nm dbnm paren_exprlist */
+ case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);
@@ -172299,14 +175513,14 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 223: /* expr ::= EXISTS LP select RP */
+ case 225: /* expr ::= EXISTS LP select RP */
{
Expr *p;
p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47);
}
break;
- case 224: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 226: /* expr ::= CASE case_operand case_exprlist case_else END */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0);
if( yymsp[-4].minor.yy528 ){
@@ -172318,29 +175532,29 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 225: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528);
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 226: /* case_exprlist ::= WHEN expr THEN expr */
+ case 228: /* case_exprlist ::= WHEN expr THEN expr */
{
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 231: /* nexprlist ::= nexprlist COMMA expr */
+ case 233: /* nexprlist ::= nexprlist COMMA expr */
{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);}
break;
- case 232: /* nexprlist ::= expr */
+ case 234: /* nexprlist ::= expr */
{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/}
break;
- case 234: /* paren_exprlist ::= LP exprlist RP */
- case 239: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==239);
+ case 236: /* paren_exprlist ::= LP exprlist RP */
+ case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241);
{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;}
break;
- case 235: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394,
@@ -172350,48 +175564,48 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 236: /* uniqueflag ::= UNIQUE */
- case 278: /* raisetype ::= ABORT */ yytestcase(yyruleno==278);
+ case 238: /* uniqueflag ::= UNIQUE */
+ case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280);
{yymsp[0].minor.yy394 = OE_Abort;}
break;
- case 237: /* uniqueflag ::= */
+ case 239: /* uniqueflag ::= */
{yymsp[1].minor.yy394 = OE_None;}
break;
- case 240: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */
{
yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394);
}
break;
- case 241: /* eidlist ::= nm collate sortorder */
+ case 243: /* eidlist ::= nm collate sortorder */
{
yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/
}
break;
- case 244: /* cmd ::= DROP INDEX ifexists fullname */
+ case 246: /* cmd ::= DROP INDEX ifexists fullname */
{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);}
break;
- case 245: /* cmd ::= VACUUM vinto */
+ case 247: /* cmd ::= VACUUM vinto */
{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);}
break;
- case 246: /* cmd ::= VACUUM nm vinto */
+ case 248: /* cmd ::= VACUUM nm vinto */
{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);}
break;
- case 249: /* cmd ::= PRAGMA nm dbnm */
+ case 251: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 250: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 251: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 252: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 253: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 256: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
@@ -172399,50 +175613,50 @@ static YYACTIONTYPE yy_reduce(
sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all);
}
break;
- case 257: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394);
yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
}
break;
- case 258: /* trigger_time ::= BEFORE|AFTER */
+ case 260: /* trigger_time ::= BEFORE|AFTER */
{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ }
break;
- case 259: /* trigger_time ::= INSTEAD OF */
+ case 261: /* trigger_time ::= INSTEAD OF */
{ yymsp[-1].minor.yy394 = TK_INSTEAD;}
break;
- case 260: /* trigger_time ::= */
+ case 262: /* trigger_time ::= */
{ yymsp[1].minor.yy394 = TK_BEFORE; }
break;
- case 261: /* trigger_event ::= DELETE|INSERT */
- case 262: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==262);
+ case 263: /* trigger_event ::= DELETE|INSERT */
+ case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264);
{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;}
break;
- case 263: /* trigger_event ::= UPDATE OF idlist */
+ case 265: /* trigger_event ::= UPDATE OF idlist */
{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;}
break;
- case 264: /* when_clause ::= */
- case 283: /* key_opt ::= */ yytestcase(yyruleno==283);
+ case 266: /* when_clause ::= */
+ case 285: /* key_opt ::= */ yytestcase(yyruleno==285);
{ yymsp[1].minor.yy528 = 0; }
break;
- case 265: /* when_clause ::= WHEN expr */
- case 284: /* key_opt ::= KEY expr */ yytestcase(yyruleno==284);
+ case 267: /* when_clause ::= WHEN expr */
+ case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286);
{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; }
break;
- case 266: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
assert( yymsp[-2].minor.yy33!=0 );
yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33;
yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 267: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
assert( yymsp[-1].minor.yy33!=0 );
yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 268: /* trnm ::= nm DOT nm */
+ case 270: /* trnm ::= nm DOT nm */
{
yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -172450,39 +175664,39 @@ static YYACTIONTYPE yy_reduce(
"statements within triggers");
}
break;
- case 269: /* tridxby ::= INDEXED BY nm */
+ case 271: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 270: /* tridxby ::= NOT INDEXED */
+ case 272: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 271: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-8].minor.yy33 = yylhsminor.yy33;
break;
- case 272: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
{
yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/
}
yymsp[-7].minor.yy33 = yylhsminor.yy33;
break;
- case 273: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-5].minor.yy33 = yylhsminor.yy33;
break;
- case 274: /* trigger_cmd ::= scanpt select scanpt */
+ case 276: /* trigger_cmd ::= scanpt select scanpt */
{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/}
yymsp[-2].minor.yy33 = yylhsminor.yy33;
break;
- case 275: /* expr ::= RAISE LP IGNORE RP */
+ case 277: /* expr ::= RAISE LP IGNORE RP */
{
yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
if( yymsp[-3].minor.yy528 ){
@@ -172490,7 +175704,7 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 276: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
if( yymsp[-5].minor.yy528 ) {
@@ -172498,118 +175712,114 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 277: /* raisetype ::= ROLLBACK */
+ case 279: /* raisetype ::= ROLLBACK */
{yymsp[0].minor.yy394 = OE_Rollback;}
break;
- case 279: /* raisetype ::= FAIL */
+ case 281: /* raisetype ::= FAIL */
{yymsp[0].minor.yy394 = OE_Fail;}
break;
- case 280: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 282: /* cmd ::= DROP TRIGGER ifexists fullname */
{
sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394);
}
break;
- case 281: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528);
}
break;
- case 282: /* cmd ::= DETACH database_kw_opt expr */
+ case 284: /* cmd ::= DETACH database_kw_opt expr */
{
sqlite3Detach(pParse, yymsp[0].minor.yy528);
}
break;
- case 285: /* cmd ::= REINDEX */
+ case 287: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 286: /* cmd ::= REINDEX nm dbnm */
+ case 288: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 287: /* cmd ::= ANALYZE */
+ case 289: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 288: /* cmd ::= ANALYZE nm dbnm */
+ case 290: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 289: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0);
}
break;
- case 290: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
{
yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
}
break;
- case 291: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
{
sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0);
}
break;
- case 292: /* add_column_fullname ::= fullname */
+ case 294: /* add_column_fullname ::= fullname */
{
disableLookaside(pParse);
sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131);
}
break;
- case 293: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
{
sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 294: /* cmd ::= create_vtab */
+ case 296: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 295: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 297: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 296: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394);
}
break;
- case 297: /* vtabarg ::= */
+ case 299: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 298: /* vtabargtoken ::= ANY */
- case 299: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==299);
- case 300: /* lp ::= LP */ yytestcase(yyruleno==300);
+ case 300: /* vtabargtoken ::= ANY */
+ case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301);
+ case 302: /* lp ::= LP */ yytestcase(yyruleno==302);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 301: /* with ::= WITH wqlist */
- case 302: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==302);
+ case 303: /* with ::= WITH wqlist */
+ case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304);
{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); }
break;
- case 303: /* wqas ::= AS */
+ case 305: /* wqas ::= AS */
{yymsp[0].minor.yy516 = M10d_Any;}
break;
- case 304: /* wqas ::= AS MATERIALIZED */
+ case 306: /* wqas ::= AS MATERIALIZED */
{yymsp[-1].minor.yy516 = M10d_Yes;}
break;
- case 305: /* wqas ::= AS NOT MATERIALIZED */
+ case 307: /* wqas ::= AS NOT MATERIALIZED */
{yymsp[-2].minor.yy516 = M10d_No;}
break;
- case 306: /* wqitem ::= nm eidlist_opt wqas LP select RP */
+ case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */
{
yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/
}
break;
- case 307: /* wqlist ::= wqitem */
+ case 309: /* wqlist ::= wqitem */
{
yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/
}
break;
- case 308: /* wqlist ::= wqlist COMMA wqitem */
+ case 310: /* wqlist ::= wqlist COMMA wqitem */
{
yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385);
}
break;
- case 309: /* windowdefn_list ::= windowdefn */
-{ yylhsminor.yy41 = yymsp[0].minor.yy41; }
- yymsp[0].minor.yy41 = yylhsminor.yy41;
- break;
- case 310: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
{
assert( yymsp[0].minor.yy41!=0 );
sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41);
@@ -172618,7 +175828,7 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy41 = yylhsminor.yy41;
break;
- case 311: /* windowdefn ::= nm AS LP window RP */
+ case 312: /* windowdefn ::= nm AS LP window RP */
{
if( ALWAYS(yymsp[-1].minor.yy41) ){
yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n);
@@ -172627,35 +175837,28 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 312: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
{
yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0);
}
break;
- case 313: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0);
}
yymsp[-5].minor.yy41 = yylhsminor.yy41;
break;
- case 314: /* window ::= ORDER BY sortlist frame_opt */
+ case 315: /* window ::= ORDER BY sortlist frame_opt */
{
yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0);
}
break;
- case 315: /* window ::= nm ORDER BY sortlist frame_opt */
+ case 316: /* window ::= nm ORDER BY sortlist frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0);
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 316: /* window ::= frame_opt */
- case 335: /* filter_over ::= over_clause */ yytestcase(yyruleno==335);
-{
- yylhsminor.yy41 = yymsp[0].minor.yy41;
-}
- yymsp[0].minor.yy41 = yylhsminor.yy41;
- break;
case 317: /* window ::= nm frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0);
@@ -172721,6 +175924,12 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-1].minor.yy41 = yylhsminor.yy41;
break;
+ case 335: /* filter_over ::= over_clause */
+{
+ yylhsminor.yy41 = yymsp[0].minor.yy41;
+}
+ yymsp[0].minor.yy41 = yylhsminor.yy41;
+ break;
case 336: /* filter_over ::= filter_clause */
{
yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
@@ -172814,6 +176023,8 @@ static YYACTIONTYPE yy_reduce(
/* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400);
/* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401);
/* (402) with ::= */ yytestcase(yyruleno==402);
+ /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403);
+ /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404);
break;
/********** End reduce actions ************************************************/
};
@@ -173602,180 +176813,179 @@ static const unsigned char aKWCode[148] = {0,
static int keywordCode(const char *z, int n, int *pType){
int i, j;
const char *zKW;
- if( n>=2 ){
- i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127;
- for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){
- if( aKWLen[i]!=n ) continue;
- zKW = &zKWText[aKWOffset[i]];
+ assert( n>=2 );
+ i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127;
+ for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){
+ if( aKWLen[i]!=n ) continue;
+ zKW = &zKWText[aKWOffset[i]];
#ifdef SQLITE_ASCII
- if( (z[0]&~0x20)!=zKW[0] ) continue;
- if( (z[1]&~0x20)!=zKW[1] ) continue;
- j = 2;
- while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }
+ if( (z[0]&~0x20)!=zKW[0] ) continue;
+ if( (z[1]&~0x20)!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }
#endif
#ifdef SQLITE_EBCDIC
- if( toupper(z[0])!=zKW[0] ) continue;
- if( toupper(z[1])!=zKW[1] ) continue;
- j = 2;
- while( j<n && toupper(z[j])==zKW[j] ){ j++; }
-#endif
- if( j<n ) continue;
- testcase( i==1 ); /* REINDEX */
- testcase( i==2 ); /* INDEXED */
- testcase( i==3 ); /* INDEX */
- testcase( i==4 ); /* DESC */
- testcase( i==5 ); /* ESCAPE */
- testcase( i==6 ); /* EACH */
- testcase( i==7 ); /* CHECK */
- testcase( i==8 ); /* KEY */
- testcase( i==9 ); /* BEFORE */
- testcase( i==10 ); /* FOREIGN */
- testcase( i==11 ); /* FOR */
- testcase( i==12 ); /* IGNORE */
- testcase( i==13 ); /* REGEXP */
- testcase( i==14 ); /* EXPLAIN */
- testcase( i==15 ); /* INSTEAD */
- testcase( i==16 ); /* ADD */
- testcase( i==17 ); /* DATABASE */
- testcase( i==18 ); /* AS */
- testcase( i==19 ); /* SELECT */
- testcase( i==20 ); /* TABLE */
- testcase( i==21 ); /* LEFT */
- testcase( i==22 ); /* THEN */
- testcase( i==23 ); /* END */
- testcase( i==24 ); /* DEFERRABLE */
- testcase( i==25 ); /* ELSE */
- testcase( i==26 ); /* EXCLUDE */
- testcase( i==27 ); /* DELETE */
- testcase( i==28 ); /* TEMPORARY */
- testcase( i==29 ); /* TEMP */
- testcase( i==30 ); /* OR */
- testcase( i==31 ); /* ISNULL */
- testcase( i==32 ); /* NULLS */
- testcase( i==33 ); /* SAVEPOINT */
- testcase( i==34 ); /* INTERSECT */
- testcase( i==35 ); /* TIES */
- testcase( i==36 ); /* NOTNULL */
- testcase( i==37 ); /* NOT */
- testcase( i==38 ); /* NO */
- testcase( i==39 ); /* NULL */
- testcase( i==40 ); /* LIKE */
- testcase( i==41 ); /* EXCEPT */
- testcase( i==42 ); /* TRANSACTION */
- testcase( i==43 ); /* ACTION */
- testcase( i==44 ); /* ON */
- testcase( i==45 ); /* NATURAL */
- testcase( i==46 ); /* ALTER */
- testcase( i==47 ); /* RAISE */
- testcase( i==48 ); /* EXCLUSIVE */
- testcase( i==49 ); /* EXISTS */
- testcase( i==50 ); /* CONSTRAINT */
- testcase( i==51 ); /* INTO */
- testcase( i==52 ); /* OFFSET */
- testcase( i==53 ); /* OF */
- testcase( i==54 ); /* SET */
- testcase( i==55 ); /* TRIGGER */
- testcase( i==56 ); /* RANGE */
- testcase( i==57 ); /* GENERATED */
- testcase( i==58 ); /* DETACH */
- testcase( i==59 ); /* HAVING */
- testcase( i==60 ); /* GLOB */
- testcase( i==61 ); /* BEGIN */
- testcase( i==62 ); /* INNER */
- testcase( i==63 ); /* REFERENCES */
- testcase( i==64 ); /* UNIQUE */
- testcase( i==65 ); /* QUERY */
- testcase( i==66 ); /* WITHOUT */
- testcase( i==67 ); /* WITH */
- testcase( i==68 ); /* OUTER */
- testcase( i==69 ); /* RELEASE */
- testcase( i==70 ); /* ATTACH */
- testcase( i==71 ); /* BETWEEN */
- testcase( i==72 ); /* NOTHING */
- testcase( i==73 ); /* GROUPS */
- testcase( i==74 ); /* GROUP */
- testcase( i==75 ); /* CASCADE */
- testcase( i==76 ); /* ASC */
- testcase( i==77 ); /* DEFAULT */
- testcase( i==78 ); /* CASE */
- testcase( i==79 ); /* COLLATE */
- testcase( i==80 ); /* CREATE */
- testcase( i==81 ); /* CURRENT_DATE */
- testcase( i==82 ); /* IMMEDIATE */
- testcase( i==83 ); /* JOIN */
- testcase( i==84 ); /* INSERT */
- testcase( i==85 ); /* MATCH */
- testcase( i==86 ); /* PLAN */
- testcase( i==87 ); /* ANALYZE */
- testcase( i==88 ); /* PRAGMA */
- testcase( i==89 ); /* MATERIALIZED */
- testcase( i==90 ); /* DEFERRED */
- testcase( i==91 ); /* DISTINCT */
- testcase( i==92 ); /* IS */
- testcase( i==93 ); /* UPDATE */
- testcase( i==94 ); /* VALUES */
- testcase( i==95 ); /* VIRTUAL */
- testcase( i==96 ); /* ALWAYS */
- testcase( i==97 ); /* WHEN */
- testcase( i==98 ); /* WHERE */
- testcase( i==99 ); /* RECURSIVE */
- testcase( i==100 ); /* ABORT */
- testcase( i==101 ); /* AFTER */
- testcase( i==102 ); /* RENAME */
- testcase( i==103 ); /* AND */
- testcase( i==104 ); /* DROP */
- testcase( i==105 ); /* PARTITION */
- testcase( i==106 ); /* AUTOINCREMENT */
- testcase( i==107 ); /* TO */
- testcase( i==108 ); /* IN */
- testcase( i==109 ); /* CAST */
- testcase( i==110 ); /* COLUMN */
- testcase( i==111 ); /* COMMIT */
- testcase( i==112 ); /* CONFLICT */
- testcase( i==113 ); /* CROSS */
- testcase( i==114 ); /* CURRENT_TIMESTAMP */
- testcase( i==115 ); /* CURRENT_TIME */
- testcase( i==116 ); /* CURRENT */
- testcase( i==117 ); /* PRECEDING */
- testcase( i==118 ); /* FAIL */
- testcase( i==119 ); /* LAST */
- testcase( i==120 ); /* FILTER */
- testcase( i==121 ); /* REPLACE */
- testcase( i==122 ); /* FIRST */
- testcase( i==123 ); /* FOLLOWING */
- testcase( i==124 ); /* FROM */
- testcase( i==125 ); /* FULL */
- testcase( i==126 ); /* LIMIT */
- testcase( i==127 ); /* IF */
- testcase( i==128 ); /* ORDER */
- testcase( i==129 ); /* RESTRICT */
- testcase( i==130 ); /* OTHERS */
- testcase( i==131 ); /* OVER */
- testcase( i==132 ); /* RETURNING */
- testcase( i==133 ); /* RIGHT */
- testcase( i==134 ); /* ROLLBACK */
- testcase( i==135 ); /* ROWS */
- testcase( i==136 ); /* ROW */
- testcase( i==137 ); /* UNBOUNDED */
- testcase( i==138 ); /* UNION */
- testcase( i==139 ); /* USING */
- testcase( i==140 ); /* VACUUM */
- testcase( i==141 ); /* VIEW */
- testcase( i==142 ); /* WINDOW */
- testcase( i==143 ); /* DO */
- testcase( i==144 ); /* BY */
- testcase( i==145 ); /* INITIALLY */
- testcase( i==146 ); /* ALL */
- testcase( i==147 ); /* PRIMARY */
- *pType = aKWCode[i];
- break;
- }
+ if( toupper(z[0])!=zKW[0] ) continue;
+ if( toupper(z[1])!=zKW[1] ) continue;
+ j = 2;
+ while( j<n && toupper(z[j])==zKW[j] ){ j++; }
+#endif
+ if( j<n ) continue;
+ testcase( i==1 ); /* REINDEX */
+ testcase( i==2 ); /* INDEXED */
+ testcase( i==3 ); /* INDEX */
+ testcase( i==4 ); /* DESC */
+ testcase( i==5 ); /* ESCAPE */
+ testcase( i==6 ); /* EACH */
+ testcase( i==7 ); /* CHECK */
+ testcase( i==8 ); /* KEY */
+ testcase( i==9 ); /* BEFORE */
+ testcase( i==10 ); /* FOREIGN */
+ testcase( i==11 ); /* FOR */
+ testcase( i==12 ); /* IGNORE */
+ testcase( i==13 ); /* REGEXP */
+ testcase( i==14 ); /* EXPLAIN */
+ testcase( i==15 ); /* INSTEAD */
+ testcase( i==16 ); /* ADD */
+ testcase( i==17 ); /* DATABASE */
+ testcase( i==18 ); /* AS */
+ testcase( i==19 ); /* SELECT */
+ testcase( i==20 ); /* TABLE */
+ testcase( i==21 ); /* LEFT */
+ testcase( i==22 ); /* THEN */
+ testcase( i==23 ); /* END */
+ testcase( i==24 ); /* DEFERRABLE */
+ testcase( i==25 ); /* ELSE */
+ testcase( i==26 ); /* EXCLUDE */
+ testcase( i==27 ); /* DELETE */
+ testcase( i==28 ); /* TEMPORARY */
+ testcase( i==29 ); /* TEMP */
+ testcase( i==30 ); /* OR */
+ testcase( i==31 ); /* ISNULL */
+ testcase( i==32 ); /* NULLS */
+ testcase( i==33 ); /* SAVEPOINT */
+ testcase( i==34 ); /* INTERSECT */
+ testcase( i==35 ); /* TIES */
+ testcase( i==36 ); /* NOTNULL */
+ testcase( i==37 ); /* NOT */
+ testcase( i==38 ); /* NO */
+ testcase( i==39 ); /* NULL */
+ testcase( i==40 ); /* LIKE */
+ testcase( i==41 ); /* EXCEPT */
+ testcase( i==42 ); /* TRANSACTION */
+ testcase( i==43 ); /* ACTION */
+ testcase( i==44 ); /* ON */
+ testcase( i==45 ); /* NATURAL */
+ testcase( i==46 ); /* ALTER */
+ testcase( i==47 ); /* RAISE */
+ testcase( i==48 ); /* EXCLUSIVE */
+ testcase( i==49 ); /* EXISTS */
+ testcase( i==50 ); /* CONSTRAINT */
+ testcase( i==51 ); /* INTO */
+ testcase( i==52 ); /* OFFSET */
+ testcase( i==53 ); /* OF */
+ testcase( i==54 ); /* SET */
+ testcase( i==55 ); /* TRIGGER */
+ testcase( i==56 ); /* RANGE */
+ testcase( i==57 ); /* GENERATED */
+ testcase( i==58 ); /* DETACH */
+ testcase( i==59 ); /* HAVING */
+ testcase( i==60 ); /* GLOB */
+ testcase( i==61 ); /* BEGIN */
+ testcase( i==62 ); /* INNER */
+ testcase( i==63 ); /* REFERENCES */
+ testcase( i==64 ); /* UNIQUE */
+ testcase( i==65 ); /* QUERY */
+ testcase( i==66 ); /* WITHOUT */
+ testcase( i==67 ); /* WITH */
+ testcase( i==68 ); /* OUTER */
+ testcase( i==69 ); /* RELEASE */
+ testcase( i==70 ); /* ATTACH */
+ testcase( i==71 ); /* BETWEEN */
+ testcase( i==72 ); /* NOTHING */
+ testcase( i==73 ); /* GROUPS */
+ testcase( i==74 ); /* GROUP */
+ testcase( i==75 ); /* CASCADE */
+ testcase( i==76 ); /* ASC */
+ testcase( i==77 ); /* DEFAULT */
+ testcase( i==78 ); /* CASE */
+ testcase( i==79 ); /* COLLATE */
+ testcase( i==80 ); /* CREATE */
+ testcase( i==81 ); /* CURRENT_DATE */
+ testcase( i==82 ); /* IMMEDIATE */
+ testcase( i==83 ); /* JOIN */
+ testcase( i==84 ); /* INSERT */
+ testcase( i==85 ); /* MATCH */
+ testcase( i==86 ); /* PLAN */
+ testcase( i==87 ); /* ANALYZE */
+ testcase( i==88 ); /* PRAGMA */
+ testcase( i==89 ); /* MATERIALIZED */
+ testcase( i==90 ); /* DEFERRED */
+ testcase( i==91 ); /* DISTINCT */
+ testcase( i==92 ); /* IS */
+ testcase( i==93 ); /* UPDATE */
+ testcase( i==94 ); /* VALUES */
+ testcase( i==95 ); /* VIRTUAL */
+ testcase( i==96 ); /* ALWAYS */
+ testcase( i==97 ); /* WHEN */
+ testcase( i==98 ); /* WHERE */
+ testcase( i==99 ); /* RECURSIVE */
+ testcase( i==100 ); /* ABORT */
+ testcase( i==101 ); /* AFTER */
+ testcase( i==102 ); /* RENAME */
+ testcase( i==103 ); /* AND */
+ testcase( i==104 ); /* DROP */
+ testcase( i==105 ); /* PARTITION */
+ testcase( i==106 ); /* AUTOINCREMENT */
+ testcase( i==107 ); /* TO */
+ testcase( i==108 ); /* IN */
+ testcase( i==109 ); /* CAST */
+ testcase( i==110 ); /* COLUMN */
+ testcase( i==111 ); /* COMMIT */
+ testcase( i==112 ); /* CONFLICT */
+ testcase( i==113 ); /* CROSS */
+ testcase( i==114 ); /* CURRENT_TIMESTAMP */
+ testcase( i==115 ); /* CURRENT_TIME */
+ testcase( i==116 ); /* CURRENT */
+ testcase( i==117 ); /* PRECEDING */
+ testcase( i==118 ); /* FAIL */
+ testcase( i==119 ); /* LAST */
+ testcase( i==120 ); /* FILTER */
+ testcase( i==121 ); /* REPLACE */
+ testcase( i==122 ); /* FIRST */
+ testcase( i==123 ); /* FOLLOWING */
+ testcase( i==124 ); /* FROM */
+ testcase( i==125 ); /* FULL */
+ testcase( i==126 ); /* LIMIT */
+ testcase( i==127 ); /* IF */
+ testcase( i==128 ); /* ORDER */
+ testcase( i==129 ); /* RESTRICT */
+ testcase( i==130 ); /* OTHERS */
+ testcase( i==131 ); /* OVER */
+ testcase( i==132 ); /* RETURNING */
+ testcase( i==133 ); /* RIGHT */
+ testcase( i==134 ); /* ROLLBACK */
+ testcase( i==135 ); /* ROWS */
+ testcase( i==136 ); /* ROW */
+ testcase( i==137 ); /* UNBOUNDED */
+ testcase( i==138 ); /* UNION */
+ testcase( i==139 ); /* USING */
+ testcase( i==140 ); /* VACUUM */
+ testcase( i==141 ); /* VIEW */
+ testcase( i==142 ); /* WINDOW */
+ testcase( i==143 ); /* DO */
+ testcase( i==144 ); /* BY */
+ testcase( i==145 ); /* INITIALLY */
+ testcase( i==146 ); /* ALL */
+ testcase( i==147 ); /* PRIMARY */
+ *pType = aKWCode[i];
+ break;
}
return n;
}
SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
int id = TK_ID;
- keywordCode((char*)z, n, &id);
+ if( n>=2 ) keywordCode((char*)z, n, &id);
return id;
}
#define SQLITE_N_KEYWORD 147
@@ -174080,7 +177290,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' );
testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' );
testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' );
- testcase( z[0]=='9' );
+ testcase( z[0]=='9' ); testcase( z[0]=='.' );
*tokenType = TK_INTEGER;
#ifndef SQLITE_OMIT_HEX_INTEGER
if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){
@@ -174152,7 +177362,8 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
return i;
}
case CC_KYWD0: {
- for(i=1; aiClass[z[i]]<=CC_KYWD; i++){}
+ if( aiClass[z[1]]>CC_KYWD ){ i = 1; break; }
+ for(i=2; aiClass[z[i]]<=CC_KYWD; i++){}
if( IdChar(z[i]) ){
/* This token started out using characters that can appear in keywords,
** but z[i] is a character not allowed within keywords, so this must
@@ -174931,30 +178142,20 @@ static int sqlite3TestExtInit(sqlite3 *db){
** Forward declarations of external module initializer functions
** for modules that need them.
*/
-#ifdef SQLITE_ENABLE_FTS1
-SQLITE_PRIVATE int sqlite3Fts1Init(sqlite3*);
-#endif
-#ifdef SQLITE_ENABLE_FTS2
-SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*);
-#endif
#ifdef SQLITE_ENABLE_FTS5
SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*);
#endif
#ifdef SQLITE_ENABLE_STMTVTAB
SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*);
#endif
-
+#ifdef SQLITE_EXTRA_AUTOEXT
+int SQLITE_EXTRA_AUTOEXT(sqlite3*);
+#endif
/*
** An array of pointers to extension initializer functions for
** built-in extensions.
*/
static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = {
-#ifdef SQLITE_ENABLE_FTS1
- sqlite3Fts1Init,
-#endif
-#ifdef SQLITE_ENABLE_FTS2
- sqlite3Fts2Init,
-#endif
#ifdef SQLITE_ENABLE_FTS3
sqlite3Fts3Init,
#endif
@@ -174983,6 +178184,9 @@ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = {
#ifdef SQLITE_ENABLE_BYTECODE_VTAB
sqlite3VdbeBytecodeVtabInit,
#endif
+#ifdef SQLITE_EXTRA_AUTOEXT
+ SQLITE_EXTRA_AUTOEXT,
+#endif
};
#ifndef SQLITE_AMALGAMATION
@@ -175057,6 +178261,32 @@ SQLITE_API char *sqlite3_temp_directory = 0;
SQLITE_API char *sqlite3_data_directory = 0;
/*
+** Determine whether or not high-precision (long double) floating point
+** math works correctly on CPU currently running.
+*/
+static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){
+ if( sizeof(LONGDOUBLE_TYPE)<=8 ){
+ /* If the size of "long double" is not more than 8, then
+ ** high-precision math is not possible. */
+ return 0;
+ }else{
+ /* Just because sizeof(long double)>8 does not mean that the underlying
+ ** hardware actually supports high-precision floating point. For example,
+ ** clearing the 0x100 bit in the floating-point control word on Intel
+ ** processors will make long double work like double, even though long
+ ** double takes up more space. The only way to determine if long double
+ ** actually works is to run an experiment. */
+ LONGDOUBLE_TYPE a, b, c;
+ rc++;
+ a = 1.0+rc*0.1;
+ b = 1.0e+18+rc*25.0;
+ c = a+b;
+ return b!=c;
+ }
+}
+
+
+/*
** Initialize SQLite.
**
** This routine must be called to initialize the memory allocation,
@@ -175251,6 +178481,12 @@ SQLITE_API int sqlite3_initialize(void){
}
#endif
+ /* Experimentally determine if high-precision floating point is
+ ** available. */
+#ifndef SQLITE_OMIT_WSD
+ sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc);
+#endif
+
return rc;
}
@@ -175821,6 +179057,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){
SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
va_list ap;
int rc;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
va_start(ap, op);
switch( op ){
@@ -176150,6 +179390,14 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){
}
#endif
+ while( db->pDbData ){
+ DbClientData *p = db->pDbData;
+ db->pDbData = p->pNext;
+ assert( p->pData!=0 );
+ if( p->xDestructor ) p->xDestructor(p->pData);
+ sqlite3_free(p);
+ }
+
/* Convert the connection into a zombie and then close it.
*/
db->eOpenState = SQLITE_STATE_ZOMBIE;
@@ -176567,9 +179815,9 @@ static int sqliteDefaultBusyCallback(
void *ptr, /* Database connection */
int count /* Number of times table has been busy */
){
-#if SQLITE_OS_WIN || HAVE_USLEEP
+#if SQLITE_OS_WIN || !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP
/* This case is for systems that have support for sleeping for fractions of
- ** a second. Examples: All windows systems, unix systems with usleep() */
+ ** a second. Examples: All windows systems, unix systems with nanosleep() */
static const u8 delays[] =
{ 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 };
static const u8 totals[] =
@@ -176767,7 +180015,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC );
assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY );
extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY|
- SQLITE_SUBTYPE|SQLITE_INNOCUOUS);
+ SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE);
enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY);
/* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But
@@ -177224,6 +180472,12 @@ SQLITE_API void *sqlite3_preupdate_hook(
void *pArg /* First callback argument */
){
void *pRet;
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( db==0 ){
+ return 0;
+ }
+#endif
sqlite3_mutex_enter(db->mutex);
pRet = db->pPreUpdateArg;
db->xPreUpdateCallback = xCallback;
@@ -177370,7 +180624,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2(
if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_TRUNCATE ){
/* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint
** mode: */
- return SQLITE_MISUSE;
+ return SQLITE_MISUSE_BKPT;
}
sqlite3_mutex_enter(db->mutex);
@@ -178207,7 +181461,7 @@ static int openDatabase(
** 0 off off
**
** Legacy behavior is 3 (double-quoted string literals are allowed anywhere)
-** and so that is the default. But developers are encouranged to use
+** and so that is the default. But developers are encouraged to use
** -DSQLITE_DQS=0 (best) or -DSQLITE_DQS=1 (second choice) if possible.
*/
#if !defined(SQLITE_DQS)
@@ -178607,6 +181861,69 @@ SQLITE_API int sqlite3_collation_needed16(
}
#endif /* SQLITE_OMIT_UTF16 */
+/*
+** Find existing client data.
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3 *db, const char *zName){
+ DbClientData *p;
+ sqlite3_mutex_enter(db->mutex);
+ for(p=db->pDbData; p; p=p->pNext){
+ if( strcmp(p->zName, zName)==0 ){
+ void *pResult = p->pData;
+ sqlite3_mutex_leave(db->mutex);
+ return pResult;
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+ return 0;
+}
+
+/*
+** Add new client data to a database connection.
+*/
+SQLITE_API int sqlite3_set_clientdata(
+ sqlite3 *db, /* Attach client data to this connection */
+ const char *zName, /* Name of the client data */
+ void *pData, /* The client data itself */
+ void (*xDestructor)(void*) /* Destructor */
+){
+ DbClientData *p, **pp;
+ sqlite3_mutex_enter(db->mutex);
+ pp = &db->pDbData;
+ for(p=db->pDbData; p && strcmp(p->zName,zName); p=p->pNext){
+ pp = &p->pNext;
+ }
+ if( p ){
+ assert( p->pData!=0 );
+ if( p->xDestructor ) p->xDestructor(p->pData);
+ if( pData==0 ){
+ *pp = p->pNext;
+ sqlite3_free(p);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+ }
+ }else if( pData==0 ){
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+ }else{
+ size_t n = strlen(zName);
+ p = sqlite3_malloc64( sizeof(DbClientData)+n+1 );
+ if( p==0 ){
+ if( xDestructor ) xDestructor(pData);
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_NOMEM;
+ }
+ memcpy(p->zName, zName, n+1);
+ p->pNext = db->pDbData;
+ db->pDbData = p;
+ }
+ p->pData = pData;
+ p->xDestructor = xDestructor;
+ sqlite3_mutex_leave(db->mutex);
+ return SQLITE_OK;
+}
+
+
#ifndef SQLITE_OMIT_DEPRECATED
/*
** This function is now an anachronism. It used to be used to recover from a
@@ -178742,7 +182059,7 @@ SQLITE_API int sqlite3_table_column_metadata(
/* Find the column for which info is requested */
if( zColumnName==0 ){
- /* Query for existance of table only */
+ /* Query for existence of table only */
}else{
for(iCol=0; iCol<pTab->nCol; iCol++){
pCol = &pTab->aCol[iCol];
@@ -178956,6 +182273,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){
}
#endif
+ /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b);
+ **
+ ** If b is true, then activate the SQLITE_FkNoAction setting. If b is
+ ** false then clearn that setting. If the SQLITE_FkNoAction setting is
+ ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if
+ ** they were NO ACTION, regardless of how they are defined.
+ **
+ ** NB: One must usually run "PRAGMA writable_schema=RESET" after
+ ** using this test-control, before it will take full effect. failing
+ ** to reset the schema can result in some unexpected behavior.
+ */
+ case SQLITE_TESTCTRL_FK_NO_ACTION: {
+ sqlite3 *db = va_arg(ap, sqlite3*);
+ int b = va_arg(ap, int);
+ if( b ){
+ db->flags |= SQLITE_FkNoAction;
+ }else{
+ db->flags &= ~SQLITE_FkNoAction;
+ }
+ break;
+ }
+
/*
** sqlite3_test_control(BITVEC_TEST, size, program)
**
@@ -179062,10 +182401,12 @@ SQLITE_API int sqlite3_test_control(int op, ...){
sqlite3ShowSrcList(0);
sqlite3ShowWith(0);
sqlite3ShowUpsert(0);
+#ifndef SQLITE_OMIT_TRIGGER
sqlite3ShowTriggerStep(0);
sqlite3ShowTriggerStepList(0);
sqlite3ShowTrigger(0);
sqlite3ShowTriggerList(0);
+#endif
#ifndef SQLITE_OMIT_WINDOWFUNC
sqlite3ShowWindow(0);
sqlite3ShowWinFunc(0);
@@ -179182,7 +182523,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
** formed and never corrupt. This flag is clear by default, indicating that
** database files might have arbitrary corruption. Setting the flag during
** testing causes certain assert() statements in the code to be activated
- ** that demonstrat invariants on well-formed database files.
+ ** that demonstrate invariants on well-formed database files.
*/
case SQLITE_TESTCTRL_NEVER_CORRUPT: {
sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int);
@@ -179336,7 +182677,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
**
** op==0 Store the current sqlite3TreeTrace in *ptr
** op==1 Set sqlite3TreeTrace to the value *ptr
- ** op==3 Store the current sqlite3WhereTrace in *ptr
+ ** op==2 Store the current sqlite3WhereTrace in *ptr
** op==3 Set sqlite3WhereTrace to the value *ptr
*/
case SQLITE_TESTCTRL_TRACEFLAGS: {
@@ -179372,6 +182713,23 @@ SQLITE_API int sqlite3_test_control(int op, ...){
break;
}
+#if !defined(SQLITE_OMIT_WSD)
+ /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X);
+ **
+ ** X<0 Make no changes to the bUseLongDouble. Just report value.
+ ** X==0 Disable bUseLongDouble
+ ** X==1 Enable bUseLongDouble
+ ** X>=2 Set bUseLongDouble to its default value for this platform
+ */
+ case SQLITE_TESTCTRL_USELONGDOUBLE: {
+ int b = va_arg(ap, int);
+ if( b>=2 ) b = hasHighPrecisionDouble(b);
+ if( b>=0 ) sqlite3Config.bUseLongDouble = b>0;
+ rc = sqlite3Config.bUseLongDouble!=0;
+ break;
+ }
+#endif
+
#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD)
/* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue)
@@ -179402,6 +182760,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){
break;
}
#endif
+
+ /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff);
+ **
+ ** Activate or deactivate validation of JSONB that is generated from
+ ** text. Off by default, as the validation is slow. Validation is
+ ** only available if compiled using SQLITE_DEBUG.
+ **
+ ** If onOff is initially 1, then turn it on. If onOff is initially
+ ** off, turn it off. If onOff is initially -1, then change onOff
+ ** to be the current setting.
+ */
+ case SQLITE_TESTCTRL_JSON_SELFCHECK: {
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD)
+ int *pOnOff = va_arg(ap, int*);
+ if( *pOnOff<0 ){
+ *pOnOff = sqlite3Config.bJsonSelfcheck;
+ }else{
+ sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff);
+ }
+#endif
+ break;
+ }
}
va_end(ap);
#endif /* SQLITE_UNTESTABLE */
@@ -179672,7 +183052,7 @@ SQLITE_API int sqlite3_snapshot_get(
}
/*
-** Open a read-transaction on the snapshot idendified by pSnapshot.
+** Open a read-transaction on the snapshot identified by pSnapshot.
*/
SQLITE_API int sqlite3_snapshot_open(
sqlite3 *db,
@@ -179779,7 +183159,7 @@ SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
int nOpt;
const char **azCompileOpt;
-#if SQLITE_ENABLE_API_ARMOR
+#ifdef SQLITE_ENABLE_API_ARMOR
if( zOptName==0 ){
(void)SQLITE_MISUSE_BKPT;
return 0;
@@ -179974,6 +183354,9 @@ SQLITE_API int sqlite3_unlock_notify(
){
int rc = SQLITE_OK;
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
+#endif
sqlite3_mutex_enter(db->mutex);
enterMutex();
@@ -180995,6 +184378,7 @@ struct Fts3Table {
int nPgsz; /* Page size for host database */
char *zSegmentsTbl; /* Name of %_segments table */
sqlite3_blob *pSegments; /* Blob handle open on %_segments table */
+ int iSavepoint;
/*
** The following array of hash tables is used to buffer pending index
@@ -181382,6 +184766,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*);
+SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk);
+
#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */
#endif /* _FTSINT_H */
@@ -181738,6 +185124,7 @@ static void fts3DeclareVtab(int *pRc, Fts3Table *p){
zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid");
sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(p->db, SQLITE_VTAB_INNOCUOUS);
/* Create a list of user columns for the virtual table */
zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]);
@@ -184987,6 +188374,8 @@ static int fts3RenameMethod(
rc = sqlite3Fts3PendingTermsFlush(p);
}
+ p->bIgnoreSavepoint = 1;
+
if( p->zContentTbl==0 ){
fts3DbExec(&rc, db,
"ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';",
@@ -185014,6 +188403,8 @@ static int fts3RenameMethod(
"ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';",
p->zDb, p->zName, zName
);
+
+ p->bIgnoreSavepoint = 0;
return rc;
}
@@ -185024,12 +188415,28 @@ static int fts3RenameMethod(
*/
static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
int rc = SQLITE_OK;
- UNUSED_PARAMETER(iSavepoint);
- assert( ((Fts3Table *)pVtab)->inTransaction );
- assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint );
- TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint );
- if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){
- rc = fts3SyncMethod(pVtab);
+ Fts3Table *pTab = (Fts3Table*)pVtab;
+ assert( pTab->inTransaction );
+ assert( pTab->mxSavepoint<=iSavepoint );
+ TESTONLY( pTab->mxSavepoint = iSavepoint );
+
+ if( pTab->bIgnoreSavepoint==0 ){
+ if( fts3HashCount(&pTab->aIndex[0].hPending)>0 ){
+ char *zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')",
+ pTab->zDb, pTab->zName, pTab->zName
+ );
+ if( zSql ){
+ pTab->bIgnoreSavepoint = 1;
+ rc = sqlite3_exec(pTab->db, zSql, 0, 0, 0);
+ pTab->bIgnoreSavepoint = 0;
+ sqlite3_free(zSql);
+ }else{
+ rc = SQLITE_NOMEM;
+ }
+ }
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint+1;
+ }
}
return rc;
}
@@ -185040,12 +188447,11 @@ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
** This is a no-op.
*/
static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
- TESTONLY( Fts3Table *p = (Fts3Table*)pVtab );
- UNUSED_PARAMETER(iSavepoint);
- UNUSED_PARAMETER(pVtab);
- assert( p->inTransaction );
- assert( p->mxSavepoint >= iSavepoint );
- TESTONLY( p->mxSavepoint = iSavepoint-1 );
+ Fts3Table *pTab = (Fts3Table*)pVtab;
+ assert( pTab->inTransaction );
+ assert( pTab->mxSavepoint >= iSavepoint );
+ TESTONLY( pTab->mxSavepoint = iSavepoint-1 );
+ pTab->iSavepoint = iSavepoint;
return SQLITE_OK;
}
@@ -185055,11 +188461,13 @@ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
** Discard the contents of the pending terms table.
*/
static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
- Fts3Table *p = (Fts3Table*)pVtab;
+ Fts3Table *pTab = (Fts3Table*)pVtab;
UNUSED_PARAMETER(iSavepoint);
- assert( p->inTransaction );
- TESTONLY( p->mxSavepoint = iSavepoint );
- sqlite3Fts3PendingTermsClear(p);
+ assert( pTab->inTransaction );
+ TESTONLY( pTab->mxSavepoint = iSavepoint );
+ if( (iSavepoint+1)<=pTab->iSavepoint ){
+ sqlite3Fts3PendingTermsClear(pTab);
+ }
return SQLITE_OK;
}
@@ -185078,8 +188486,40 @@ static int fts3ShadowName(const char *zName){
return 0;
}
+/*
+** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual
+** table.
+*/
+static int fts3IntegrityMethod(
+ sqlite3_vtab *pVtab, /* The virtual table to be checked */
+ const char *zSchema, /* Name of schema in which pVtab lives */
+ const char *zTabname, /* Name of the pVTab table */
+ int isQuick, /* True if this is a quick_check */
+ char **pzErr /* Write error message here */
+){
+ Fts3Table *p = (Fts3Table*)pVtab;
+ int rc;
+ int bOk = 0;
+
+ UNUSED_PARAMETER(isQuick);
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
+ assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 );
+ if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){
+ *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
+ " FTS%d table %s.%s: %s",
+ p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc));
+ }else if( bOk==0 ){
+ *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s",
+ p->bFts4 ? 4 : 3, zSchema, zTabname);
+ }
+ sqlite3Fts3SegmentsClose(p);
+ return SQLITE_OK;
+}
+
+
+
static const sqlite3_module fts3Module = {
- /* iVersion */ 3,
+ /* iVersion */ 4,
/* xCreate */ fts3CreateMethod,
/* xConnect */ fts3ConnectMethod,
/* xBestIndex */ fts3BestIndexMethod,
@@ -185103,6 +188543,7 @@ static const sqlite3_module fts3Module = {
/* xRelease */ fts3ReleaseMethod,
/* xRollbackTo */ fts3RollbackToMethod,
/* xShadowName */ fts3ShadowName,
+ /* xIntegrity */ fts3IntegrityMethod,
};
/*
@@ -187778,7 +191219,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
int rc; /* Return code */
@@ -191344,7 +194786,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash, void(*xDestr
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
int rc; /* Return code */
@@ -194685,7 +198128,6 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING);
if( rc==SQLITE_DONE ) rc = SQLITE_OK;
}
- sqlite3Fts3PendingTermsClear(p);
/* Determine the auto-incr-merge setting if unknown. If enabled,
** estimate the number of leaf blocks of content to be written
@@ -194707,6 +198149,10 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){
rc = sqlite3_reset(pStmt);
}
}
+
+ if( rc==SQLITE_OK ){
+ sqlite3Fts3PendingTermsClear(p);
+ }
return rc;
}
@@ -195338,6 +198784,8 @@ static int fts3AppendToNode(
blobGrowBuffer(pPrev, nTerm, &rc);
if( rc!=SQLITE_OK ) return rc;
+ assert( pPrev!=0 );
+ assert( pPrev->a!=0 );
nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm);
nSuffix = nTerm - nPrefix;
@@ -195394,9 +198842,13 @@ static int fts3IncrmergeAppend(
nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist;
/* If the current block is not empty, and if adding this term/doclist
- ** to the current block would make it larger than Fts3Table.nNodeSize
- ** bytes, write this block out to the database. */
- if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){
+ ** to the current block would make it larger than Fts3Table.nNodeSize bytes,
+ ** and if there is still room for another leaf page, write this block out to
+ ** the database. */
+ if( pLeaf->block.n>0
+ && (pLeaf->block.n + nSpace)>p->nNodeSize
+ && pLeaf->iBlock < (pWriter->iStart + pWriter->nLeafEst)
+ ){
rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n);
pWriter->nWork++;
@@ -195707,6 +199159,7 @@ static int fts3IncrmergeLoad(
for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){
NodeReader reader;
+ memset(&reader, 0, sizeof(reader));
pNode = &pWriter->aNodeWriter[i];
if( pNode->block.a){
@@ -195727,7 +199180,7 @@ static int fts3IncrmergeLoad(
rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0);
blobGrowBuffer(&pNode->block,
MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc
- );
+ );
if( rc==SQLITE_OK ){
memcpy(pNode->block.a, aBlock, nBlock);
pNode->block.n = nBlock;
@@ -196577,7 +200030,7 @@ static u64 fts3ChecksumIndex(
int rc;
u64 cksum = 0;
- assert( *pRc==SQLITE_OK );
+ if( *pRc ) return 0;
memset(&filter, 0, sizeof(filter));
memset(&csr, 0, sizeof(csr));
@@ -196644,7 +200097,7 @@ static u64 fts3ChecksumIndex(
** If an error occurs (e.g. an OOM or IO error), return an SQLite error
** code. The final value of *pbOk is undefined in this case.
*/
-static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
+SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){
int rc = SQLITE_OK; /* Return code */
u64 cksum1 = 0; /* Checksum based on FTS index contents */
u64 cksum2 = 0; /* Checksum based on %_content contents */
@@ -196722,7 +200175,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){
sqlite3_finalize(pStmt);
}
- *pbOk = (cksum1==cksum2);
+ *pbOk = (rc==SQLITE_OK && cksum1==cksum2);
return rc;
}
@@ -196762,7 +200215,7 @@ static int fts3DoIntegrityCheck(
){
int rc;
int bOk = 0;
- rc = fts3IntegrityCheck(p, &bOk);
+ rc = sqlite3Fts3IntegrityCheck(p, &bOk);
if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB;
return rc;
}
@@ -196792,8 +200245,11 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){
rc = fts3DoIncrmerge(p, &zVal[6]);
}else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){
rc = fts3DoAutoincrmerge(p, &zVal[10]);
+ }else if( nVal==5 && 0==sqlite3_strnicmp(zVal, "flush", 5) ){
+ rc = sqlite3Fts3PendingTermsFlush(p);
+ }
#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- }else{
+ else{
int v;
if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){
v = atoi(&zVal[9]);
@@ -196811,8 +200267,8 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){
if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v;
rc = SQLITE_OK;
}
-#endif
}
+#endif
return rc;
}
@@ -199733,59 +203189,242 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){
**
******************************************************************************
**
-** This SQLite JSON functions.
+** SQLite JSON functions.
**
** This file began as an extension in ext/misc/json1.c in 2015. That
** extension proved so useful that it has now been moved into the core.
**
-** For the time being, all JSON is stored as pure text. (We might add
-** a JSONB type in the future which stores a binary encoding of JSON in
-** a BLOB, but there is no support for JSONB in the current implementation.
-** This implementation parses JSON text at 250 MB/s, so it is hard to see
-** how JSONB might improve on that.)
+** The original design stored all JSON as pure text, canonical RFC-8259.
+** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16).
+** All generated JSON text still conforms strictly to RFC-8259, but text
+** with JSON-5 extensions is accepted as input.
+**
+** Beginning with version 3.45.0 (circa 2024-01-01), these routines also
+** accept BLOB values that have JSON encoded using a binary representation
+** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk
+** format SQLite JSONB is completely different and incompatible with
+** PostgreSQL JSONB.
+**
+** Decoding and interpreting JSONB is still O(N) where N is the size of
+** the input, the same as text JSON. However, the constant of proportionality
+** for JSONB is much smaller due to faster parsing. The size of each
+** element in JSONB is encoded in its header, so there is no need to search
+** for delimiters using persnickety syntax rules. JSONB seems to be about
+** 3x faster than text JSON as a result. JSONB is also tends to be slightly
+** smaller than text JSON, by 5% or 10%, but there are corner cases where
+** JSONB can be slightly larger. So you are not far mistaken to say that
+** a JSONB blob is the same size as the equivalent RFC-8259 text.
+**
+**
+** THE JSONB ENCODING:
+**
+** Every JSON element is encoded in JSONB as a header and a payload.
+** The header is between 1 and 9 bytes in size. The payload is zero
+** or more bytes.
+**
+** The lower 4 bits of the first byte of the header determines the
+** element type:
+**
+** 0: NULL
+** 1: TRUE
+** 2: FALSE
+** 3: INT -- RFC-8259 integer literal
+** 4: INT5 -- JSON5 integer literal
+** 5: FLOAT -- RFC-8259 floating point literal
+** 6: FLOAT5 -- JSON5 floating point literal
+** 7: TEXT -- Text literal acceptable to both SQL and JSON
+** 8: TEXTJ -- Text containing RFC-8259 escapes
+** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes
+** 10: TEXTRAW -- Text containing unescaped syntax characters
+** 11: ARRAY
+** 12: OBJECT
+**
+** The other three possible values (13-15) are reserved for future
+** enhancements.
+**
+** The upper 4 bits of the first byte determine the size of the header
+** and sometimes also the size of the payload. If X is the first byte
+** of the element and if X>>4 is between 0 and 11, then the payload
+** will be that many bytes in size and the header is exactly one byte
+** in size. Other four values for X>>4 (12-15) indicate that the header
+** is more than one byte in size and that the payload size is determined
+** by the remainder of the header, interpreted as a unsigned big-endian
+** integer.
+**
+** Value of X>>4 Size integer Total header size
+** ------------- -------------------- -----------------
+** 12 1 byte (0-255) 2
+** 13 2 byte (0-65535) 3
+** 14 4 byte (0-4294967295) 5
+** 15 8 byte (0-1.8e19) 9
+**
+** The payload size need not be expressed in its minimal form. For example,
+** if the payload size is 10, the size can be expressed in any of 5 different
+** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte,
+** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by
+** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and
+** a single byte of 0x0a. The shorter forms are preferred, of course, but
+** sometimes when generating JSONB, the payload size is not known in advance
+** and it is convenient to reserve sufficient header space to cover the
+** largest possible payload size and then come back later and patch up
+** the size when it becomes known, resulting in a non-minimal encoding.
+**
+** The value (X>>4)==15 is not actually used in the current implementation
+** (as SQLite is currently unable handle BLOBs larger than about 2GB)
+** but is included in the design to allow for future enhancements.
+**
+** The payload follows the header. NULL, TRUE, and FALSE have no payload and
+** their payload size must always be zero. The payload for INT, INT5,
+** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the
+** "..." or '...' delimiters are omitted from the various text encodings.
+** The payload for ARRAY and OBJECT is a list of additional elements that
+** are the content for the array or object. The payload for an OBJECT
+** must be an even number of elements. The first element of each pair is
+** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW.
+**
+** A valid JSONB blob consists of a single element, as described above.
+** Usually this will be an ARRAY or OBJECT element which has many more
+** elements as its content. But the overall blob is just a single element.
+**
+** Input validation for JSONB blobs simply checks that the element type
+** code is between 0 and 12 and that the total size of the element
+** (header plus payload) is the same as the size of the BLOB. If those
+** checks are true, the BLOB is assumed to be JSONB and processing continues.
+** Errors are only raised if some other miscoding is discovered during
+** processing.
+**
+** Additional information can be found in the doc/jsonb.md file of the
+** canonical SQLite source tree.
*/
#ifndef SQLITE_OMIT_JSON
/* #include "sqliteInt.h" */
+/* JSONB element types
+*/
+#define JSONB_NULL 0 /* "null" */
+#define JSONB_TRUE 1 /* "true" */
+#define JSONB_FALSE 2 /* "false" */
+#define JSONB_INT 3 /* integer acceptable to JSON and SQL */
+#define JSONB_INT5 4 /* integer in 0x000 notation */
+#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */
+#define JSONB_FLOAT5 6 /* float with JSON5 extensions */
+#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */
+#define JSONB_TEXTJ 8 /* Text with JSON escapes */
+#define JSONB_TEXT5 9 /* Text with JSON-5 escape */
+#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */
+#define JSONB_ARRAY 11 /* An array */
+#define JSONB_OBJECT 12 /* An object */
+
+/* Human-readable names for the JSONB values. The index for each
+** string must correspond to the JSONB_* integer above.
+*/
+static const char * const jsonbType[] = {
+ "null", "true", "false", "integer", "integer",
+ "real", "real", "text", "text", "text",
+ "text", "array", "object", "", "", "", ""
+};
+
/*
** Growing our own isspace() routine this way is twice as fast as
** the library isspace() function, resulting in a 7% overall performance
-** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
+** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
*/
static const char jsonIsSpace[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x])
+
+/*
+** The set of all space characters recognized by jsonIsspace().
+** Useful as the second argument to strspn().
+*/
+static const char jsonSpaces[] = "\011\012\015\040";
+
+/*
+** Characters that are special to JSON. Control characters,
+** '"' and '\\' and '\''. Actually, '\'' is not special to
+** canonical JSON, but it is special in JSON-5, so we include
+** it in the set of special characters.
+*/
+static const char jsonIsOk[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
-#define fast_isspace(x) (jsonIsSpace[(unsigned char)x])
-
-#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST)
-# define VVA(X)
-#else
-# define VVA(X) X
-#endif
/* Objects */
+typedef struct JsonCache JsonCache;
typedef struct JsonString JsonString;
-typedef struct JsonNode JsonNode;
typedef struct JsonParse JsonParse;
+/*
+** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+*/
+#define JSON_CACHE_ID (-429938) /* Cache entry */
+#define JSON_CACHE_SIZE 4 /* Max number of cache entries */
+
+/*
+** jsonUnescapeOneChar() returns this invalid code point if it encounters
+** a syntax error.
+*/
+#define JSON_INVALID_CHAR 0x99999
+
+/* A cache mapping JSON text into JSONB blobs.
+**
+** Each cache entry is a JsonParse object with the following restrictions:
+**
+** * The bReadOnly flag must be set
+**
+** * The aBlob[] array must be owned by the JsonParse object. In other
+** words, nBlobAlloc must be non-zero.
+**
+** * eEdit and delta must be zero.
+**
+** * zJson must be an RCStr. In other words bJsonIsRCStr must be true.
+*/
+struct JsonCache {
+ sqlite3 *db; /* Database connection */
+ int nUsed; /* Number of active entries in the cache */
+ JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */
+};
+
/* An instance of this object represents a JSON string
** under construction. Really, this is a generic string accumulator
** that can be and is used to create strings other than JSON.
+**
+** If the generated string is longer than will fit into the zSpace[] buffer,
+** then it will be an RCStr string. This aids with caching of large
+** JSON strings.
*/
struct JsonString {
sqlite3_context *pCtx; /* Function context - put error messages here */
@@ -199793,76 +203432,75 @@ struct JsonString {
u64 nAlloc; /* Bytes of storage available in zBuf[] */
u64 nUsed; /* Bytes of zBuf[] currently used */
u8 bStatic; /* True if zBuf is static space */
- u8 bErr; /* True if an error has been encountered */
+ u8 eErr; /* True if an error has been encountered */
char zSpace[100]; /* Initial static space */
};
-/* JSON type values
-*/
-#define JSON_NULL 0
-#define JSON_TRUE 1
-#define JSON_FALSE 2
-#define JSON_INT 3
-#define JSON_REAL 4
-#define JSON_STRING 5
-#define JSON_ARRAY 6
-#define JSON_OBJECT 7
+/* Allowed values for JsonString.eErr */
+#define JSTRING_OOM 0x01 /* Out of memory */
+#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */
+#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */
-/* The "subtype" set for JSON values */
+/* The "subtype" set for text JSON values passed through using
+** sqlite3_result_subtype() and sqlite3_value_subtype().
+*/
#define JSON_SUBTYPE 74 /* Ascii for "J" */
/*
-** Names of the various JSON types:
+** Bit values for the flags passed into various SQL function implementations
+** via the sqlite3_user_data() value.
*/
-static const char * const jsonType[] = {
- "null", "true", "false", "integer", "real", "text", "array", "object"
-};
-
-/* Bit values for the JsonNode.jnFlag field
-*/
-#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
-#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
-#define JNODE_REMOVE 0x04 /* Do not output */
-#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */
-#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */
-#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */
-#define JNODE_LABEL 0x40 /* Is a label of an object */
-#define JNODE_JSON5 0x80 /* Node contains JSON5 enhancements */
-
+#define JSON_JSON 0x01 /* Result is always JSON */
+#define JSON_SQL 0x02 /* Result is always SQL */
+#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
+#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+#define JSON_BLOB 0x08 /* Use the BLOB output format */
-/* A single node of parsed JSON
-*/
-struct JsonNode {
- u8 eType; /* One of the JSON_ type values */
- u8 jnFlags; /* JNODE flags */
- u8 eU; /* Which union element to use */
- u32 n; /* Bytes of content, or number of sub-nodes */
- union {
- const char *zJContent; /* 1: Content for INT, REAL, and STRING */
- u32 iAppend; /* 2: More terms for ARRAY and OBJECT */
- u32 iKey; /* 3: Key for ARRAY objects in json_tree() */
- u32 iReplace; /* 4: Replacement content for JNODE_REPLACE */
- JsonNode *pPatch; /* 5: Node chain of patch for JNODE_PATCH */
- } u;
-};
-/* A completely parsed JSON string
+/* A parsed JSON value. Lifecycle:
+**
+** 1. JSON comes in and is parsed into a JSONB value in aBlob. The
+** original text is stored in zJson. This step is skipped if the
+** input is JSONB instead of text JSON.
+**
+** 2. The aBlob[] array is searched using the JSON path notation, if needed.
+**
+** 3. Zero or more changes are made to aBlob[] (via json_remove() or
+** json_replace() or json_patch() or similar).
+**
+** 4. New JSON text is generated from the aBlob[] for output. This step
+** is skipped if the function is one of the jsonb_* functions that
+** returns JSONB instead of text JSON.
*/
struct JsonParse {
- u32 nNode; /* Number of slots of aNode[] used */
- u32 nAlloc; /* Number of slots of aNode[] allocated */
- JsonNode *aNode; /* Array of nodes containing the parse */
- const char *zJson; /* Original JSON string */
- u32 *aUp; /* Index of parent of each node */
+ u8 *aBlob; /* JSONB representation of JSON value */
+ u32 nBlob; /* Bytes of aBlob[] actually used */
+ u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */
+ char *zJson; /* Json text used for parsing */
+ sqlite3 *db; /* The database connection to which this object belongs */
+ int nJson; /* Length of the zJson string in bytes */
+ u32 nJPRef; /* Number of references to this object */
+ u32 iErr; /* Error location in zJson[] */
u16 iDepth; /* Nesting depth */
u8 nErr; /* Number of errors seen */
u8 oom; /* Set to true if out of memory */
+ u8 bJsonIsRCStr; /* True if zJson is an RCStr */
u8 hasNonstd; /* True if input uses non-standard features like JSON5 */
- int nJson; /* Length of the zJson string in bytes */
- u32 iErr; /* Error location in zJson[] */
- u32 iHold; /* Replace cache line with the lowest iHold value */
+ u8 bReadOnly; /* Do not modify. */
+ /* Search and edit information. See jsonLookupStep() */
+ u8 eEdit; /* Edit operation to apply */
+ int delta; /* Size change due to the edit */
+ u32 nIns; /* Number of bytes to insert */
+ u32 iLabel; /* Location of label if search landed on an object value */
+ u8 *aIns; /* Content to be inserted */
};
+/* Allowed values for JsonParse.eEdit */
+#define JEDIT_DEL 1 /* Delete if exists */
+#define JEDIT_REPL 2 /* Overwrite if exists */
+#define JEDIT_INS 3 /* Insert if not exists */
+#define JEDIT_SET 4 /* Insert or overwrite */
+
/*
** Maximum nesting depth of JSON for this implementation.
**
@@ -199870,15 +203508,151 @@ struct JsonParse {
** descent parser. A depth of 1000 is far deeper than any sane JSON
** should go. Historical note: This limit was 2000 prior to version 3.42.0
*/
-#define JSON_MAX_DEPTH 1000
+#ifndef SQLITE_JSON_MAX_DEPTH
+# define JSON_MAX_DEPTH 1000
+#else
+# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH
+#endif
+
+/*
+** Allowed values for the flgs argument to jsonParseFuncArg();
+*/
+#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */
+#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */
+
+/**************************************************************************
+** Forward references
+**************************************************************************/
+static void jsonReturnStringAsBlob(JsonString*);
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson);
+static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*);
+static void jsonReturnParse(sqlite3_context*,JsonParse*);
+static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32);
+static void jsonParseFree(JsonParse*);
+static u32 jsonbPayloadSize(const JsonParse*, u32, u32*);
+static u32 jsonUnescapeOneChar(const char*, u32, u32*);
+
+/**************************************************************************
+** Utility routines for dealing with JsonCache objects
+**************************************************************************/
+
+/*
+** Free a JsonCache object.
+*/
+static void jsonCacheDelete(JsonCache *p){
+ int i;
+ for(i=0; i<p->nUsed; i++){
+ jsonParseFree(p->a[i]);
+ }
+ sqlite3DbFree(p->db, p);
+}
+static void jsonCacheDeleteGeneric(void *p){
+ jsonCacheDelete((JsonCache*)p);
+}
+
+/*
+** Insert a new entry into the cache. If the cache is full, expel
+** the least recently used entry. Return SQLITE_OK on success or a
+** result code otherwise.
+**
+** Cache entries are stored in age order, oldest first.
+*/
+static int jsonCacheInsert(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ JsonParse *pParse /* The parse object to be added to the cache */
+){
+ JsonCache *p;
+
+ assert( pParse->zJson!=0 );
+ assert( pParse->bJsonIsRCStr );
+ assert( pParse->delta==0 );
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ sqlite3 *db = sqlite3_context_db_handle(ctx);
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) return SQLITE_NOMEM;
+ p->db = db;
+ sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric);
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ) return SQLITE_NOMEM;
+ }
+ if( p->nUsed >= JSON_CACHE_SIZE ){
+ jsonParseFree(p->a[0]);
+ memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0]));
+ p->nUsed = JSON_CACHE_SIZE-1;
+ }
+ assert( pParse->nBlobAlloc>0 );
+ pParse->eEdit = 0;
+ pParse->nJPRef++;
+ pParse->bReadOnly = 1;
+ p->a[p->nUsed] = pParse;
+ p->nUsed++;
+ return SQLITE_OK;
+}
+
+/*
+** Search for a cached translation the json text supplied by pArg. Return
+** the JsonParse object if found. Return NULL if not found.
+**
+** When a match if found, the matching entry is moved to become the
+** most-recently used entry if it isn't so already.
+**
+** The JsonParse object returned still belongs to the Cache and might
+** be deleted at any moment. If the caller whants the JsonParse to
+** linger, it needs to increment the nPJRef reference counter.
+*/
+static JsonParse *jsonCacheSearch(
+ sqlite3_context *ctx, /* The SQL statement context holding the cache */
+ sqlite3_value *pArg /* Function argument containing SQL text */
+){
+ JsonCache *p;
+ int i;
+ const char *zJson;
+ int nJson;
+
+ if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){
+ return 0;
+ }
+ zJson = (const char*)sqlite3_value_text(pArg);
+ if( zJson==0 ) return 0;
+ nJson = sqlite3_value_bytes(pArg);
+
+ p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID);
+ if( p==0 ){
+ return 0;
+ }
+ for(i=0; i<p->nUsed; i++){
+ if( p->a[i]->zJson==zJson ) break;
+ }
+ if( i>=p->nUsed ){
+ for(i=0; i<p->nUsed; i++){
+ if( p->a[i]->nJson!=nJson ) continue;
+ if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break;
+ }
+ }
+ if( i<p->nUsed ){
+ if( i<p->nUsed-1 ){
+ /* Make the matching entry the most recently used entry */
+ JsonParse *tmp = p->a[i];
+ memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp));
+ p->a[p->nUsed-1] = tmp;
+ i = p->nUsed - 1;
+ }
+ assert( p->a[i]->delta==0 );
+ return p->a[i];
+ }else{
+ return 0;
+ }
+}
/**************************************************************************
** Utility routines for dealing with JsonString objects
**************************************************************************/
-/* Set the JsonString object to an empty string
+/* Turn uninitialized bulk memory into a valid JsonString object
+** holding a zero-length string.
*/
-static void jsonZero(JsonString *p){
+static void jsonStringZero(JsonString *p){
p->zBuf = p->zSpace;
p->nAlloc = sizeof(p->zSpace);
p->nUsed = 0;
@@ -199887,53 +203661,51 @@ static void jsonZero(JsonString *p){
/* Initialize the JsonString object
*/
-static void jsonInit(JsonString *p, sqlite3_context *pCtx){
+static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){
p->pCtx = pCtx;
- p->bErr = 0;
- jsonZero(p);
+ p->eErr = 0;
+ jsonStringZero(p);
}
-
/* Free all allocated memory and reset the JsonString object back to its
** initial state.
*/
-static void jsonReset(JsonString *p){
- if( !p->bStatic ) sqlite3_free(p->zBuf);
- jsonZero(p);
+static void jsonStringReset(JsonString *p){
+ if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf);
+ jsonStringZero(p);
}
-
/* Report an out-of-memory (OOM) condition
*/
-static void jsonOom(JsonString *p){
- p->bErr = 1;
- sqlite3_result_error_nomem(p->pCtx);
- jsonReset(p);
+static void jsonStringOom(JsonString *p){
+ p->eErr |= JSTRING_OOM;
+ if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx);
+ jsonStringReset(p);
}
/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
** Return zero on success. Return non-zero on an OOM error
*/
-static int jsonGrow(JsonString *p, u32 N){
+static int jsonStringGrow(JsonString *p, u32 N){
u64 nTotal = N<p->nAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
char *zNew;
if( p->bStatic ){
- if( p->bErr ) return 1;
- zNew = sqlite3_malloc64(nTotal);
+ if( p->eErr ) return 1;
+ zNew = sqlite3RCStrNew(nTotal);
if( zNew==0 ){
- jsonOom(p);
+ jsonStringOom(p);
return SQLITE_NOMEM;
}
memcpy(zNew, p->zBuf, (size_t)p->nUsed);
p->zBuf = zNew;
p->bStatic = 0;
}else{
- zNew = sqlite3_realloc64(p->zBuf, nTotal);
- if( zNew==0 ){
- jsonOom(p);
+ p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal);
+ if( p->zBuf==0 ){
+ p->eErr |= JSTRING_OOM;
+ jsonStringZero(p);
return SQLITE_NOMEM;
}
- p->zBuf = zNew;
}
p->nAlloc = nTotal;
return SQLITE_OK;
@@ -199941,18 +203713,41 @@ static int jsonGrow(JsonString *p, u32 N){
/* Append N bytes from zIn onto the end of the JsonString string.
*/
-static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
- if( N==0 ) return;
- if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
+static SQLITE_NOINLINE void jsonStringExpandAndAppend(
+ JsonString *p,
+ const char *zIn,
+ u32 N
+){
+ assert( N>0 );
+ if( jsonStringGrow(p,N) ) return;
memcpy(p->zBuf+p->nUsed, zIn, N);
p->nUsed += N;
}
+static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
+ if( N==0 ) return;
+ if( N+p->nUsed >= p->nAlloc ){
+ jsonStringExpandAndAppend(p,zIn,N);
+ }else{
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+ }
+}
+static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){
+ assert( N>0 );
+ if( N+p->nUsed >= p->nAlloc ){
+ jsonStringExpandAndAppend(p,zIn,N);
+ }else{
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+ }
+}
+
/* Append formatted text (not to exceed N bytes) to the JsonString.
*/
static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
va_list ap;
- if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
+ if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return;
va_start(ap, zFormat);
sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
va_end(ap);
@@ -199961,10 +203756,38 @@ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
/* Append a single character
*/
-static void jsonAppendChar(JsonString *p, char c){
- if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
+static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){
+ if( jsonStringGrow(p,1) ) return;
p->zBuf[p->nUsed++] = c;
}
+static void jsonAppendChar(JsonString *p, char c){
+ if( p->nUsed>=p->nAlloc ){
+ jsonAppendCharExpand(p,c);
+ }else{
+ p->zBuf[p->nUsed++] = c;
+ }
+}
+
+/* Remove a single character from the end of the string
+*/
+static void jsonStringTrimOneChar(JsonString *p){
+ if( p->eErr==0 ){
+ assert( p->nUsed>0 );
+ p->nUsed--;
+ }
+}
+
+
+/* Make sure there is a zero terminator on p->zBuf[]
+**
+** Return true on success. Return false if an OOM prevents this
+** from happening.
+*/
+static int jsonStringTerminate(JsonString *p){
+ jsonAppendChar(p, 0);
+ jsonStringTrimOneChar(p);
+ return p->eErr==0;
+}
/* Append a comma separator to the output buffer, if the previous
** character is not '[' or '{'.
@@ -199973,25 +203796,76 @@ static void jsonAppendSeparator(JsonString *p){
char c;
if( p->nUsed==0 ) return;
c = p->zBuf[p->nUsed-1];
- if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
+ if( c=='[' || c=='{' ) return;
+ jsonAppendChar(p, ',');
}
/* Append the N-byte string in zIn to the end of the JsonString string
-** under construction. Enclose the string in "..." and escape
-** any double-quotes or backslash characters contained within the
+** under construction. Enclose the string in double-quotes ("...") and
+** escape any double-quotes or backslash characters contained within the
** string.
+**
+** This routine is a high-runner. There is a measurable performance
+** increase associated with unwinding the jsonIsOk[] loop.
*/
static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return;
+ u32 k;
+ u8 c;
+ const u8 *z = (const u8*)zIn;
+ if( z==0 ) return;
+ if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return;
p->zBuf[p->nUsed++] = '"';
- for(i=0; i<N; i++){
- unsigned char c = ((unsigned const char*)zIn)[i];
+ while( 1 /*exit-by-break*/ ){
+ k = 0;
+ /* The following while() is the 4-way unwound equivalent of
+ **
+ ** while( k<N && jsonIsOk[z[k]] ){ k++; }
+ */
+ while( 1 /* Exit by break */ ){
+ if( k+3>=N ){
+ while( k<N && jsonIsOk[z[k]] ){ k++; }
+ break;
+ }
+ if( !jsonIsOk[z[k]] ){
+ break;
+ }
+ if( !jsonIsOk[z[k+1]] ){
+ k += 1;
+ break;
+ }
+ if( !jsonIsOk[z[k+2]] ){
+ k += 2;
+ break;
+ }
+ if( !jsonIsOk[z[k+3]] ){
+ k += 3;
+ break;
+ }else{
+ k += 4;
+ }
+ }
+ if( k>=N ){
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ }
+ break;
+ }
+ if( k>0 ){
+ memcpy(&p->zBuf[p->nUsed], z, k);
+ p->nUsed += k;
+ z += k;
+ N -= k;
+ }
+ c = z[0];
if( c=='"' || c=='\\' ){
json_simple_escape:
- if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
+ if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return;
p->zBuf[p->nUsed++] = '\\';
- }else if( c<=0x1f ){
+ p->zBuf[p->nUsed++] = c;
+ }else if( c=='\'' ){
+ p->zBuf[p->nUsed++] = c;
+ }else{
static const char aSpecial[] = {
0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -200002,158 +203876,37 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
assert( aSpecial['\n']=='n' );
assert( aSpecial['\r']=='r' );
assert( aSpecial['\t']=='t' );
+ assert( c>=0 && c<sizeof(aSpecial) );
if( aSpecial[c] ){
c = aSpecial[c];
goto json_simple_escape;
}
- if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
+ if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return;
p->zBuf[p->nUsed++] = '\\';
p->zBuf[p->nUsed++] = 'u';
p->zBuf[p->nUsed++] = '0';
p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = '0' + (c>>4);
- c = "0123456789abcdef"[c&0xf];
+ p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4];
+ p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf];
}
- p->zBuf[p->nUsed++] = c;
+ z++;
+ N--;
}
p->zBuf[p->nUsed++] = '"';
assert( p->nUsed<p->nAlloc );
}
/*
-** The zIn[0..N] string is a JSON5 string literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- jsonAppendChar(p, '"');
- zIn++;
- N -= 2;
- while( N>0 ){
- for(i=0; i<N && zIn[i]!='\\'; i++){}
- if( i>0 ){
- jsonAppendRaw(p, zIn, i);
- zIn += i;
- N -= i;
- if( N==0 ) break;
- }
- assert( zIn[0]=='\\' );
- switch( (u8)zIn[1] ){
- case '\'':
- jsonAppendChar(p, '\'');
- break;
- case 'v':
- jsonAppendRaw(p, "\\u0009", 6);
- break;
- case 'x':
- jsonAppendRaw(p, "\\u00", 4);
- jsonAppendRaw(p, &zIn[2], 2);
- zIn += 2;
- N -= 2;
- break;
- case '0':
- jsonAppendRaw(p, "\\u0000", 6);
- break;
- case '\r':
- if( zIn[2]=='\n' ){
- zIn++;
- N--;
- }
- break;
- case '\n':
- break;
- case 0xe2:
- assert( N>=4 );
- assert( 0x80==(u8)zIn[2] );
- assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] );
- zIn += 2;
- N -= 2;
- break;
- default:
- jsonAppendRaw(p, zIn, 2);
- break;
- }
- zIn += 2;
- N -= 2;
- }
- jsonAppendChar(p, '"');
-}
-
-/*
-** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
+** Append an sqlite3_value (such as a function parameter) to the JSON
+** string under construction in p.
*/
-static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){
- sqlite3_int64 i = 0;
- int rc = sqlite3DecOrHexToI64(zIn, &i);
- if( rc<=1 ){
- jsonPrintf(100,p,"%lld",i);
- }else{
- assert( rc==2 );
- jsonAppendRaw(p, "9.0e999", 7);
- }
- return;
- }
- jsonAppendRaw(p, zIn, N);
-}
-
-/*
-** The zIn[0..N] string is a JSON5 real literal. Append to p a translation
-** of the string literal that standard JSON and that omits all JSON5
-** features.
-*/
-static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( zIn[0]=='+' ){
- zIn++;
- N--;
- }else if( zIn[0]=='-' ){
- jsonAppendChar(p, '-');
- zIn++;
- N--;
- }
- if( zIn[0]=='.' ){
- jsonAppendChar(p, '0');
- }
- for(i=0; i<N; i++){
- if( zIn[i]=='.' && (i+1==N || !sqlite3Isdigit(zIn[i+1])) ){
- i++;
- jsonAppendRaw(p, zIn, i);
- zIn += i;
- N -= i;
- jsonAppendChar(p, '0');
- break;
- }
- }
- if( N>0 ){
- jsonAppendRaw(p, zIn, N);
- }
-}
-
-
-
-/*
-** Append a function parameter value to the JSON string under
-** construction.
-*/
-static void jsonAppendValue(
+static void jsonAppendSqlValue(
JsonString *p, /* Append to this JSON string */
sqlite3_value *pValue /* Value to append */
){
switch( sqlite3_value_type(pValue) ){
case SQLITE_NULL: {
- jsonAppendRaw(p, "null", 4);
+ jsonAppendRawNZ(p, "null", 4);
break;
}
case SQLITE_FLOAT: {
@@ -200177,205 +203930,127 @@ static void jsonAppendValue(
break;
}
default: {
- if( p->bErr==0 ){
+ if( jsonFuncArgMightBeBinary(pValue) ){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(pValue);
+ px.nBlob = sqlite3_value_bytes(pValue);
+ jsonTranslateBlobToText(&px, 0, p);
+ }else if( p->eErr==0 ){
sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
- p->bErr = 2;
- jsonReset(p);
+ p->eErr = JSTRING_ERR;
+ jsonStringReset(p);
}
break;
}
}
}
-
-/* Make the JSON in p the result of the SQL function.
+/* Make the text in p (which is probably a generated JSON text string)
+** the result of the SQL function.
+**
+** The JsonString is reset.
+**
+** If pParse and ctx are both non-NULL, then the SQL string in p is
+** loaded into the zJson field of the pParse object as a RCStr and the
+** pParse is added to the cache.
*/
-static void jsonResult(JsonString *p){
- if( p->bErr==0 ){
- sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
- p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
- SQLITE_UTF8);
- jsonZero(p);
+static void jsonReturnString(
+ JsonString *p, /* String to return */
+ JsonParse *pParse, /* JSONB source or NULL */
+ sqlite3_context *ctx /* Where to cache */
+){
+ assert( (pParse!=0)==(ctx!=0) );
+ assert( ctx==0 || ctx==p->pCtx );
+ if( p->eErr==0 ){
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx));
+ if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(p);
+ }else if( p->bStatic ){
+ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ }else if( jsonStringTerminate(p) ){
+ if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){
+ int rc;
+ pParse->zJson = sqlite3RCStrRef(p->zBuf);
+ pParse->nJson = p->nUsed;
+ pParse->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, pParse);
+ if( rc==SQLITE_NOMEM ){
+ sqlite3_result_error_nomem(ctx);
+ jsonStringReset(p);
+ return;
+ }
+ }
+ sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed,
+ sqlite3RCStrUnref,
+ SQLITE_UTF8);
+ }else{
+ sqlite3_result_error_nomem(p->pCtx);
+ }
+ }else if( p->eErr & JSTRING_OOM ){
+ sqlite3_result_error_nomem(p->pCtx);
+ }else if( p->eErr & JSTRING_MALFORMED ){
+ sqlite3_result_error(p->pCtx, "malformed JSON", -1);
}
- assert( p->bStatic );
+ jsonStringReset(p);
}
/**************************************************************************
-** Utility routines for dealing with JsonNode and JsonParse objects
+** Utility routines for dealing with JsonParse objects
**************************************************************************/
/*
-** Return the number of consecutive JsonNode slots need to represent
-** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
-** OBJECT types, the number might be larger.
-**
-** Appended elements are not counted. The value returned is the number
-** by which the JsonNode counter should increment in order to go to the
-** next peer value.
-*/
-static u32 jsonNodeSize(JsonNode *pNode){
- return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
-}
-
-/*
** Reclaim all memory allocated by a JsonParse object. But do not
** delete the JsonParse object itself.
*/
static void jsonParseReset(JsonParse *pParse){
- sqlite3_free(pParse->aNode);
- pParse->aNode = 0;
- pParse->nNode = 0;
- pParse->nAlloc = 0;
- sqlite3_free(pParse->aUp);
- pParse->aUp = 0;
+ assert( pParse->nJPRef<=1 );
+ if( pParse->bJsonIsRCStr ){
+ sqlite3RCStrUnref(pParse->zJson);
+ pParse->zJson = 0;
+ pParse->nJson = 0;
+ pParse->bJsonIsRCStr = 0;
+ }
+ if( pParse->nBlobAlloc ){
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ pParse->aBlob = 0;
+ pParse->nBlob = 0;
+ pParse->nBlobAlloc = 0;
+ }
}
/*
-** Free a JsonParse object that was obtained from sqlite3_malloc().
+** Decrement the reference count on the JsonParse object. When the
+** count reaches zero, free the object.
*/
static void jsonParseFree(JsonParse *pParse){
- jsonParseReset(pParse);
- sqlite3_free(pParse);
-}
-
-/*
-** Convert the JsonNode pNode into a pure JSON string and
-** append to pOut. Subsubstructure is also included. Return
-** the number of JsonNode objects that are encoded.
-*/
-static void jsonRenderNode(
- JsonNode *pNode, /* The node to render */
- JsonString *pOut, /* Write JSON here */
- sqlite3_value **aReplace /* Replacement values */
-){
- assert( pNode!=0 );
- if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){
- if( (pNode->jnFlags & JNODE_REPLACE)!=0 && ALWAYS(aReplace!=0) ){
- assert( pNode->eU==4 );
- jsonAppendValue(pOut, aReplace[pNode->u.iReplace]);
- return;
- }
- assert( pNode->eU==5 );
- pNode = pNode->u.pPatch;
- }
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- jsonAppendRaw(pOut, "null", 4);
- break;
- }
- case JSON_TRUE: {
- jsonAppendRaw(pOut, "true", 4);
- break;
- }
- case JSON_FALSE: {
- jsonAppendRaw(pOut, "false", 5);
- break;
- }
- case JSON_STRING: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->jnFlags & JNODE_LABEL ){
- jsonAppendChar(pOut, '"');
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- jsonAppendChar(pOut, '"');
- }else{
- jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
- }
- }else if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_REAL: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_INT: {
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_JSON5 ){
- jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n);
- }else{
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- }
- break;
- }
- case JSON_ARRAY: {
- u32 j = 1;
- jsonAppendChar(pOut, '[');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- }
- j += jsonNodeSize(&pNode[j]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, ']');
- break;
- }
- case JSON_OBJECT: {
- u32 j = 1;
- jsonAppendChar(pOut, '{');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- jsonAppendChar(pOut, ':');
- jsonRenderNode(&pNode[j+1], pOut, aReplace);
- }
- j += 1 + jsonNodeSize(&pNode[j+1]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pNode->eU==2 );
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, '}');
- break;
+ if( pParse ){
+ if( pParse->nJPRef>1 ){
+ pParse->nJPRef--;
+ }else{
+ jsonParseReset(pParse);
+ sqlite3DbFree(pParse->db, pParse);
}
}
}
-/*
-** Return a JsonNode and all its descendents as a JSON string.
-*/
-static void jsonReturnJson(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- JsonString s;
- jsonInit(&s, pCtx);
- jsonRenderNode(pNode, &s, aReplace);
- jsonResult(&s);
- sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
-}
+/**************************************************************************
+** Utility routines for the JSON text parser
+**************************************************************************/
/*
** Translate a single byte of Hex into an integer.
-** This routine only works if h really is a valid hexadecimal
-** character: 0..9a..fA..F
+** This routine only gives a correct answer if h really is a valid hexadecimal
+** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not
+** assert() if the digit is not hex.
*/
static u8 jsonHexToInt(int h){
- assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') );
+#ifdef SQLITE_ASCII
+ h += 9*(1&(h>>6));
+#endif
#ifdef SQLITE_EBCDIC
h += 9*(1&~(h>>4));
-#else
- h += 9*(1&(h>>6));
#endif
return (u8)(h & 0xf);
}
@@ -200385,10 +204060,6 @@ static u8 jsonHexToInt(int h){
*/
static u32 jsonHexToInt4(const char *z){
u32 v;
- assert( sqlite3Isxdigit(z[0]) );
- assert( sqlite3Isxdigit(z[1]) );
- assert( sqlite3Isxdigit(z[2]) );
- assert( sqlite3Isxdigit(z[3]) );
v = (jsonHexToInt(z[0])<<12)
+ (jsonHexToInt(z[1])<<8)
+ (jsonHexToInt(z[2])<<4)
@@ -200397,227 +204068,6 @@ static u32 jsonHexToInt4(const char *z){
}
/*
-** Make the JsonNode the return value of the function.
-*/
-static void jsonReturn(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- sqlite3_result_null(pCtx);
- break;
- }
- case JSON_TRUE: {
- sqlite3_result_int(pCtx, 1);
- break;
- }
- case JSON_FALSE: {
- sqlite3_result_int(pCtx, 0);
- break;
- }
- case JSON_INT: {
- sqlite3_int64 i = 0;
- int rc;
- int bNeg = 0;
- const char *z;
-
-
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- if( z[0]=='-' ){ z++; bNeg = 1; }
- else if( z[0]=='+' ){ z++; }
- rc = sqlite3DecOrHexToI64(z, &i);
- if( rc<=1 ){
- sqlite3_result_int64(pCtx, bNeg ? -i : i);
- }else if( rc==3 && bNeg ){
- sqlite3_result_int64(pCtx, SMALLEST_INT64);
- }else{
- goto to_double;
- }
- break;
- }
- case JSON_REAL: {
- double r;
- const char *z;
- assert( pNode->eU==1 );
- to_double:
- z = pNode->u.zJContent;
- sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
- sqlite3_result_double(pCtx, r);
- break;
- }
- case JSON_STRING: {
- if( pNode->jnFlags & JNODE_RAW ){
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
- SQLITE_TRANSIENT);
- }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
- /* JSON formatted without any backslash-escapes */
- assert( pNode->eU==1 );
- sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
- SQLITE_TRANSIENT);
- }else{
- /* Translate JSON formatted string into raw text */
- u32 i;
- u32 n = pNode->n;
- const char *z;
- char *zOut;
- u32 j;
- u32 nOut = n;
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- zOut = sqlite3_malloc( nOut+1 );
- if( zOut==0 ){
- sqlite3_result_error_nomem(pCtx);
- break;
- }
- for(i=1, j=0; i<n-1; i++){
- char c = z[i];
- if( c=='\\' ){
- c = z[++i];
- if( c=='u' ){
- u32 v = jsonHexToInt4(z+i+1);
- i += 4;
- if( v==0 ) break;
- if( v<=0x7f ){
- zOut[j++] = (char)v;
- }else if( v<=0x7ff ){
- zOut[j++] = (char)(0xc0 | (v>>6));
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- u32 vlo;
- if( (v&0xfc00)==0xd800
- && i<n-6
- && z[i+1]=='\\'
- && z[i+2]=='u'
- && ((vlo = jsonHexToInt4(z+i+3))&0xfc00)==0xdc00
- ){
- /* We have a surrogate pair */
- v = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
- i += 6;
- zOut[j++] = 0xf0 | (v>>18);
- zOut[j++] = 0x80 | ((v>>12)&0x3f);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- zOut[j++] = 0xe0 | (v>>12);
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }
- }
- continue;
- }else if( c=='b' ){
- c = '\b';
- }else if( c=='f' ){
- c = '\f';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='r' ){
- c = '\r';
- }else if( c=='t' ){
- c = '\t';
- }else if( c=='v' ){
- c = '\v';
- }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){
- /* pass through unchanged */
- }else if( c=='0' ){
- c = 0;
- }else if( c=='x' ){
- c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]);
- i += 2;
- }else if( c=='\r' && z[i+1]=='\n' ){
- i++;
- continue;
- }else if( 0xe2==(u8)c ){
- assert( 0x80==(u8)z[i+1] );
- assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] );
- i += 2;
- continue;
- }else{
- continue;
- }
- } /* end if( c=='\\' ) */
- zOut[j++] = c;
- } /* end for() */
- zOut[j] = 0;
- sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
- }
- break;
- }
- case JSON_ARRAY:
- case JSON_OBJECT: {
- jsonReturnJson(pNode, pCtx, aReplace);
- break;
- }
- }
-}
-
-/* Forward reference */
-static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define JSON_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define JSON_NOINLINE __declspec(noinline)
-#else
-# define JSON_NOINLINE
-#endif
-
-
-static JSON_NOINLINE int jsonParseAddNodeExpand(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- u32 nNew;
- JsonNode *pNew;
- assert( pParse->nNode>=pParse->nAlloc );
- if( pParse->oom ) return -1;
- nNew = pParse->nAlloc*2 + 10;
- pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew);
- if( pNew==0 ){
- pParse->oom = 1;
- return -1;
- }
- pParse->nAlloc = nNew;
- pParse->aNode = pNew;
- assert( pParse->nNode<pParse->nAlloc );
- return jsonParseAddNode(pParse, eType, n, zContent);
-}
-
-/*
-** Create a new JsonNode instance based on the arguments and append that
-** instance to the JsonParse. Return the index in pParse->aNode[] of the
-** new node, or -1 if a memory allocation fails.
-*/
-static int jsonParseAddNode(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- JsonNode *p;
- if( pParse->aNode==0 || pParse->nNode>=pParse->nAlloc ){
- return jsonParseAddNodeExpand(pParse, eType, n, zContent);
- }
- p = &pParse->aNode[pParse->nNode];
- p->eType = (u8)(eType & 0xff);
- p->jnFlags = (u8)(eType >> 8);
- VVA( p->eU = zContent ? 1 : 0 );
- p->n = n;
- p->u.zJContent = zContent;
- return pParse->nNode++;
-}
-
-/*
** Return true if z[] begins with 2 (or more) hexadecimal digits
*/
static int jsonIs2Hex(const char *z){
@@ -200770,63 +204220,500 @@ static const struct NanInfName {
char *zMatch;
char *zRepl;
} aNanInfName[] = {
- { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" },
- { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" },
- { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" },
- { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" },
- { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" },
+ { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" },
+ { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" },
+ { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" },
+ { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" },
+ { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" },
};
+
/*
-** Parse a single JSON value which begins at pParse->zJson[i]. Return the
-** index of the first character past the end of the value parsed.
+** Report the wrong number of arguments for json_insert(), json_replace()
+** or json_set().
+*/
+static void jsonWrongNumArgs(
+ sqlite3_context *pCtx,
+ const char *zFuncName
+){
+ char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
+ zFuncName);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+}
+
+/****************************************************************************
+** Utility routines for dealing with the binary BLOB representation of JSON
+****************************************************************************/
+
+/*
+** Expand pParse->aBlob so that it holds at least N bytes.
**
-** Special return values:
+** Return the number of errors.
+*/
+static int jsonBlobExpand(JsonParse *pParse, u32 N){
+ u8 *aNew;
+ u32 t;
+ assert( N>pParse->nBlobAlloc );
+ if( pParse->nBlobAlloc==0 ){
+ t = 100;
+ }else{
+ t = pParse->nBlobAlloc*2;
+ }
+ if( t<N ) t = N+100;
+ aNew = sqlite3DbRealloc(pParse->db, pParse->aBlob, t);
+ if( aNew==0 ){ pParse->oom = 1; return 1; }
+ pParse->aBlob = aNew;
+ pParse->nBlobAlloc = t;
+ return 0;
+}
+
+/*
+** If pParse->aBlob is not previously editable (because it is taken
+** from sqlite3_value_blob(), as indicated by the fact that
+** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable
+** by making a copy into space obtained from malloc.
**
-** 0 End if input
-** -1 Syntax error
-** -2 '}' seen
-** -3 ']' seen
-** -4 ',' seen
-** -5 ':' seen
+** Return true on success. Return false on OOM.
*/
-static int jsonParseValue(JsonParse *pParse, u32 i){
+static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){
+ u8 *aOld;
+ u32 nSize;
+ assert( !pParse->bReadOnly );
+ if( pParse->oom ) return 0;
+ if( pParse->nBlobAlloc>0 ) return 1;
+ aOld = pParse->aBlob;
+ nSize = pParse->nBlob + nExtra;
+ pParse->aBlob = 0;
+ if( jsonBlobExpand(pParse, nSize) ){
+ return 0;
+ }
+ assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra );
+ memcpy(pParse->aBlob, aOld, pParse->nBlob);
+ return 1;
+}
+
+/* Expand pParse->aBlob and append one bytes.
+*/
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte(
+ JsonParse *pParse,
+ u8 c
+){
+ jsonBlobExpand(pParse, pParse->nBlob+1);
+ if( pParse->oom==0 ){
+ assert( pParse->nBlob+1<=pParse->nBlobAlloc );
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Append a single character.
+*/
+static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){
+ if( pParse->nBlob >= pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendOneByte(pParse, c);
+ }else{
+ pParse->aBlob[pParse->nBlob++] = c;
+ }
+}
+
+/* Slow version of jsonBlobAppendNode() that first resizes the
+** pParse->aBlob structure.
+*/
+static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*);
+static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode(
+ JsonParse *pParse,
+ u8 eType,
+ u32 szPayload,
+ const void *aPayload
+){
+ if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return;
+ jsonBlobAppendNode(pParse, eType, szPayload, aPayload);
+}
+
+
+/* Append an node type byte together with the payload size and
+** possibly also the payload.
+**
+** If aPayload is not NULL, then it is a pointer to the payload which
+** is also appended. If aPayload is NULL, the pParse->aBlob[] array
+** is resized (if necessary) so that it is big enough to hold the
+** payload, but the payload is not appended and pParse->nBlob is left
+** pointing to where the first byte of payload will eventually be.
+*/
+static void jsonBlobAppendNode(
+ JsonParse *pParse, /* The JsonParse object under construction */
+ u8 eType, /* Node type. One of JSONB_* */
+ u32 szPayload, /* Number of bytes of payload */
+ const void *aPayload /* The payload. Might be NULL */
+){
+ u8 *a;
+ if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){
+ jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload);
+ return;
+ }
+ assert( pParse->aBlob!=0 );
+ a = &pParse->aBlob[pParse->nBlob];
+ if( szPayload<=11 ){
+ a[0] = eType | (szPayload<<4);
+ pParse->nBlob += 1;
+ }else if( szPayload<=0xff ){
+ a[0] = eType | 0xc0;
+ a[1] = szPayload & 0xff;
+ pParse->nBlob += 2;
+ }else if( szPayload<=0xffff ){
+ a[0] = eType | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ pParse->nBlob += 3;
+ }else{
+ a[0] = eType | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ pParse->nBlob += 5;
+ }
+ if( aPayload ){
+ pParse->nBlob += szPayload;
+ memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload);
+ }
+}
+
+/* Change the payload size for the node at index i to be szPayload.
+*/
+static int jsonBlobChangePayloadSize(
+ JsonParse *pParse,
+ u32 i,
+ u32 szPayload
+){
+ u8 *a;
+ u8 szType;
+ u8 nExtra;
+ u8 nNeeded;
+ int delta;
+ if( pParse->oom ) return 0;
+ a = &pParse->aBlob[i];
+ szType = a[0]>>4;
+ if( szType<=11 ){
+ nExtra = 0;
+ }else if( szType==12 ){
+ nExtra = 1;
+ }else if( szType==13 ){
+ nExtra = 2;
+ }else{
+ nExtra = 4;
+ }
+ if( szPayload<=11 ){
+ nNeeded = 0;
+ }else if( szPayload<=0xff ){
+ nNeeded = 1;
+ }else if( szPayload<=0xffff ){
+ nNeeded = 2;
+ }else{
+ nNeeded = 4;
+ }
+ delta = nNeeded - nExtra;
+ if( delta ){
+ u32 newSize = pParse->nBlob + delta;
+ if( delta>0 ){
+ if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){
+ return 0; /* OOM error. Error state recorded in pParse->oom. */
+ }
+ a = &pParse->aBlob[i];
+ memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1));
+ }else{
+ memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta));
+ }
+ pParse->nBlob = newSize;
+ }
+ if( nNeeded==0 ){
+ a[0] = (a[0] & 0x0f) | (szPayload<<4);
+ }else if( nNeeded==1 ){
+ a[0] = (a[0] & 0x0f) | 0xc0;
+ a[1] = szPayload & 0xff;
+ }else if( nNeeded==2 ){
+ a[0] = (a[0] & 0x0f) | 0xd0;
+ a[1] = (szPayload >> 8) & 0xff;
+ a[2] = szPayload & 0xff;
+ }else{
+ a[0] = (a[0] & 0x0f) | 0xe0;
+ a[1] = (szPayload >> 24) & 0xff;
+ a[2] = (szPayload >> 16) & 0xff;
+ a[3] = (szPayload >> 8) & 0xff;
+ a[4] = szPayload & 0xff;
+ }
+ return delta;
+}
+
+/*
+** If z[0] is 'u' and is followed by exactly 4 hexadecimal character,
+** then set *pOp to JSONB_TEXTJ and return true. If not, do not make
+** any changes to *pOp and return false.
+*/
+static int jsonIs4HexB(const char *z, int *pOp){
+ if( z[0]!='u' ) return 0;
+ if( !jsonIs4Hex(&z[1]) ) return 0;
+ *pOp = JSONB_TEXTJ;
+ return 1;
+}
+
+/*
+** Check a single element of the JSONB in pParse for validity.
+**
+** The element to be checked starts at offset i and must end at on the
+** last byte before iEnd.
+**
+** Return 0 if everything is correct. Return the 1-based byte offset of the
+** error if a problem is detected. (In other words, if the error is at offset
+** 0, return 1).
+*/
+static u32 jsonbValidityCheck(
+ const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */
+ u32 i, /* Start of element as pParse->aBlob[i] */
+ u32 iEnd, /* One more than the last byte of the element */
+ u32 iDepth /* Current nesting depth */
+){
+ u32 n, sz, j, k;
+ const u8 *z;
+ u8 x;
+ if( iDepth>JSON_MAX_DEPTH ) return i+1;
+ sz = 0;
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( NEVER(n==0) ) return i+1; /* Checked by caller */
+ if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */
+ z = pParse->aBlob;
+ x = z[i] & 0x0f;
+ switch( x ){
+ case JSONB_NULL:
+ case JSONB_TRUE:
+ case JSONB_FALSE: {
+ return n+sz==1 ? 0 : i+1;
+ }
+ case JSONB_INT: {
+ if( sz<1 ) return i+1;
+ j = i+n;
+ if( z[j]=='-' ){
+ j++;
+ if( sz<2 ) return i+1;
+ }
+ k = i+n+sz;
+ while( j<k ){
+ if( sqlite3Isdigit(z[j]) ){
+ j++;
+ }else{
+ return j+1;
+ }
+ }
+ return 0;
+ }
+ case JSONB_INT5: {
+ if( sz<3 ) return i+1;
+ j = i+n;
+ if( z[j]=='-' ){
+ if( sz<4 ) return i+1;
+ j++;
+ }
+ if( z[j]!='0' ) return i+1;
+ if( z[j+1]!='x' && z[j+1]!='X' ) return j+2;
+ j += 2;
+ k = i+n+sz;
+ while( j<k ){
+ if( sqlite3Isxdigit(z[j]) ){
+ j++;
+ }else{
+ return j+1;
+ }
+ }
+ return 0;
+ }
+ case JSONB_FLOAT:
+ case JSONB_FLOAT5: {
+ u8 seen = 0; /* 0: initial. 1: '.' seen 2: 'e' seen */
+ if( sz<2 ) return i+1;
+ j = i+n;
+ k = j+sz;
+ if( z[j]=='-' ){
+ j++;
+ if( sz<3 ) return i+1;
+ }
+ if( z[j]=='.' ){
+ if( x==JSONB_FLOAT ) return j+1;
+ if( !sqlite3Isdigit(z[j+1]) ) return j+1;
+ j += 2;
+ seen = 1;
+ }else if( z[j]=='0' && x==JSONB_FLOAT ){
+ if( j+3>k ) return j+1;
+ if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1;
+ j++;
+ }
+ for(; j<k; j++){
+ if( sqlite3Isdigit(z[j]) ) continue;
+ if( z[j]=='.' ){
+ if( seen>0 ) return j+1;
+ if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){
+ return j+1;
+ }
+ seen = 1;
+ continue;
+ }
+ if( z[j]=='e' || z[j]=='E' ){
+ if( seen==2 ) return j+1;
+ if( j==k-1 ) return j+1;
+ if( z[j+1]=='+' || z[j+1]=='-' ){
+ j++;
+ if( j==k-1 ) return j+1;
+ }
+ seen = 2;
+ continue;
+ }
+ return j+1;
+ }
+ if( seen==0 ) return i+1;
+ return 0;
+ }
+ case JSONB_TEXT: {
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ if( !jsonIsOk[z[j]] && z[j]!='\'' ) return j+1;
+ j++;
+ }
+ return 0;
+ }
+ case JSONB_TEXTJ:
+ case JSONB_TEXT5: {
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ if( !jsonIsOk[z[j]] && z[j]!='\'' ){
+ if( z[j]=='"' ){
+ if( x==JSONB_TEXTJ ) return j+1;
+ }else if( z[j]!='\\' || j+1>=k ){
+ return j+1;
+ }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){
+ j++;
+ }else if( z[j+1]=='u' ){
+ if( j+5>=k ) return j+1;
+ if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1;
+ j++;
+ }else if( x!=JSONB_TEXT5 ){
+ return j+1;
+ }else{
+ u32 c = 0;
+ u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c);
+ if( c==JSON_INVALID_CHAR ) return j+1;
+ j += szC - 1;
+ }
+ }
+ j++;
+ }
+ return 0;
+ }
+ case JSONB_TEXTRAW: {
+ return 0;
+ }
+ case JSONB_ARRAY: {
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ sz = 0;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return j+1;
+ if( j+n+sz>k ) return j+1;
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ j += n + sz;
+ }
+ assert( j==k );
+ return 0;
+ }
+ case JSONB_OBJECT: {
+ u32 cnt = 0;
+ u32 sub;
+ j = i+n;
+ k = j+sz;
+ while( j<k ){
+ sz = 0;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return j+1;
+ if( j+n+sz>k ) return j+1;
+ if( (cnt & 1)==0 ){
+ x = z[j] & 0x0f;
+ if( x<JSONB_TEXT || x>JSONB_TEXTRAW ) return j+1;
+ }
+ sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1);
+ if( sub ) return sub;
+ cnt++;
+ j += n + sz;
+ }
+ assert( j==k );
+ if( (cnt & 1)!=0 ) return j+1;
+ return 0;
+ }
+ default: {
+ return i+1;
+ }
+ }
+}
+
+/*
+** Translate a single element of JSON text at pParse->zJson[i] into
+** its equivalent binary JSONB representation. Append the translation into
+** pParse->aBlob[] beginning at pParse->nBlob. The size of
+** pParse->aBlob[] is increased as necessary.
+**
+** Return the index of the first character past the end of the element parsed,
+** or one of the following special result codes:
+**
+** 0 End of input
+** -1 Syntax error or OOM
+** -2 '}' seen \
+** -3 ']' seen \___ For these returns, pParse->iErr is set to
+** -4 ',' seen / the index in zJson[] of the seen character
+** -5 ':' seen /
+*/
+static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){
char c;
u32 j;
- int iThis;
+ u32 iThis, iStart;
int x;
- JsonNode *pNode;
+ u8 t;
const char *z = pParse->zJson;
json_parse_restart:
switch( (u8)z[i] ){
case '{': {
/* Parse object */
- iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0);
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
+ iStart = pParse->nBlob;
for(j=i+1;;j++){
- u32 nNode = pParse->nNode;
- x = jsonParseValue(pParse, j);
+ u32 iBlob = pParse->nBlob;
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
+ int op;
if( x==(-2) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1;
break;
}
j += json5Whitespace(&z[j]);
+ op = JSONB_TEXT;
if( sqlite3JsonId1(z[j])
- || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2]))
+ || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op))
){
int k = j+1;
while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0)
- || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2]))
+ || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op))
){
k++;
}
- jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]);
+ assert( iBlob==pParse->nBlob );
+ jsonBlobAppendNode(pParse, op, k-j, &z[j]);
pParse->hasNonstd = 1;
x = k;
}else{
@@ -200835,24 +204722,24 @@ json_parse_restart:
}
}
if( pParse->oom ) return -1;
- pNode = &pParse->aNode[nNode];
- if( pNode->eType!=JSON_STRING ){
+ t = pParse->aBlob[iBlob] & 0x0f;
+ if( t<JSONB_TEXT || t>JSONB_TEXTRAW ){
pParse->iErr = j;
return -1;
}
- pNode->jnFlags |= JNODE_LABEL;
j = x;
if( z[j]==':' ){
j++;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ /* strspn() is not helpful here */
+ do{ j++; }while( jsonIsspace(z[j]) );
if( z[j]==':' ){
j++;
goto parse_object_value;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x!=(-5) ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
@@ -200860,7 +204747,7 @@ json_parse_restart:
j = pParse->iErr+1;
}
parse_object_value:
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x!=(-1) ) pParse->iErr = j;
return -1;
@@ -200871,15 +204758,15 @@ json_parse_restart:
}else if( z[j]=='}' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]=='}' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
@@ -200892,25 +204779,26 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '[': {
/* Parse array */
- iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- if( iThis<0 ) return -1;
+ iThis = pParse->nBlob;
+ jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0);
+ iStart = pParse->nBlob;
+ if( pParse->oom ) return -1;
if( ++pParse->iDepth > JSON_MAX_DEPTH ){
pParse->iErr = i;
return -1;
}
- memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u));
for(j=i+1;;j++){
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x<=0 ){
if( x==(-3) ){
j = pParse->iErr;
- if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1;
+ if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1;
break;
}
if( x!=(-1) ) pParse->iErr = j;
@@ -200922,15 +204810,15 @@ json_parse_restart:
}else if( z[j]==']' ){
break;
}else{
- if( fast_isspace(z[j]) ){
- do{ j++; }while( fast_isspace(z[j]) );
+ if( jsonIsspace(z[j]) ){
+ j += 1 + (u32)strspn(&z[j+1], jsonSpaces);
if( z[j]==',' ){
continue;
}else if( z[j]==']' ){
break;
}
}
- x = jsonParseValue(pParse, j);
+ x = jsonTranslateTextToBlob(pParse, j);
if( x==(-4) ){
j = pParse->iErr;
continue;
@@ -200943,60 +204831,71 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart);
pParse->iDepth--;
return j+1;
}
case '\'': {
- u8 jnFlags;
+ u8 opcode;
char cDelim;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ opcode = JSONB_TEXT;
goto parse_string;
case '"':
/* Parse string */
- jnFlags = 0;
+ opcode = JSONB_TEXT;
parse_string:
cDelim = z[i];
j = i+1;
- for(;;){
- c = z[j];
- if( (c & ~0x1f)==0 ){
- /* Control characters are not allowed in strings */
- pParse->iErr = j;
- return -1;
+ while( 1 /*exit-by-break*/ ){
+ if( jsonIsOk[(u8)z[j]] ){
+ if( !jsonIsOk[(u8)z[j+1]] ){
+ j += 1;
+ }else if( !jsonIsOk[(u8)z[j+2]] ){
+ j += 2;
+ }else{
+ j += 3;
+ continue;
+ }
}
- if( c=='\\' ){
+ c = z[j];
+ if( c==cDelim ){
+ break;
+ }else if( c=='\\' ){
c = z[++j];
if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
|| c=='n' || c=='r' || c=='t'
|| (c=='u' && jsonIs4Hex(&z[j+1])) ){
- jnFlags |= JNODE_ESCAPE;
+ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ;
}else if( c=='\'' || c=='0' || c=='v' || c=='\n'
|| (0xe2==(u8)c && 0x80==(u8)z[j+1]
&& (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]))
|| (c=='x' && jsonIs2Hex(&z[j+1])) ){
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else if( c=='\r' ){
if( z[j+1]=='\n' ) j++;
- jnFlags |= (JNODE_ESCAPE|JNODE_JSON5);
+ opcode = JSONB_TEXT5;
pParse->hasNonstd = 1;
}else{
pParse->iErr = j;
return -1;
}
- }else if( c==cDelim ){
- break;
+ }else if( c<=0x1f ){
+ /* Control characters are not allowed in strings */
+ pParse->iErr = j;
+ return -1;
+ }else if( c=='"' ){
+ opcode = JSONB_TEXT5;
}
j++;
}
- jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]);
+ jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]);
return j+1;
}
case 't': {
if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_TRUE);
return i+4;
}
pParse->iErr = i;
@@ -201004,23 +204903,22 @@ json_parse_restart:
}
case 'f': {
if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){
- jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_FALSE);
return i+5;
}
pParse->iErr = i;
return -1;
}
case '+': {
- u8 seenDP, seenE, jnFlags;
+ u8 seenE;
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
goto parse_number;
case '.':
if( sqlite3Isdigit(z[i+1]) ){
pParse->hasNonstd = 1;
- jnFlags = JNODE_JSON5;
+ t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
seenE = 0;
- seenDP = JSON_REAL;
goto parse_number_2;
}
pParse->iErr = i;
@@ -201037,9 +204935,8 @@ json_parse_restart:
case '8':
case '9':
/* Parse number */
- jnFlags = 0;
+ t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */
parse_number:
- seenDP = JSON_INT;
seenE = 0;
assert( '-' < '0' );
assert( '+' < '0' );
@@ -201049,9 +204946,9 @@ json_parse_restart:
if( c<='0' ){
if( c=='0' ){
if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){
- assert( seenDP==JSON_INT );
+ assert( t==0x00 );
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t = 0x01;
for(j=i+3; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}else if( sqlite3Isdigit(z[i+1]) ){
@@ -201068,15 +204965,15 @@ json_parse_restart:
){
pParse->hasNonstd = 1;
if( z[i]=='-' ){
- jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
}else{
- jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999");
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
}
return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4);
}
if( z[i+1]=='.' ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
goto parse_number_2;
}
pParse->iErr = i;
@@ -201088,30 +204985,31 @@ json_parse_restart:
return -1;
}else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
for(j=i+4; sqlite3Isxdigit(z[j]); j++){}
goto parse_number_finish;
}
}
}
}
+
parse_number_2:
for(j=i+1;; j++){
c = z[j];
if( sqlite3Isdigit(c) ) continue;
if( c=='.' ){
- if( seenDP==JSON_REAL ){
+ if( (t & 0x02)!=0 ){
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
continue;
}
if( c=='e' || c=='E' ){
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
@@ -201121,7 +205019,7 @@ json_parse_restart:
pParse->iErr = j;
return -1;
}
- seenDP = JSON_REAL;
+ t |= 0x02;
seenE = 1;
c = z[j+1];
if( c=='+' || c=='-' ){
@@ -201139,14 +205037,18 @@ json_parse_restart:
if( z[j-1]<'0' ){
if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){
pParse->hasNonstd = 1;
- jnFlags |= JNODE_JSON5;
+ t |= 0x01;
}else{
pParse->iErr = j;
return -1;
}
}
parse_number_finish:
- jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]);
+ assert( JSONB_INT+0x01==JSONB_INT5 );
+ assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 );
+ assert( JSONB_INT+0x02==JSONB_FLOAT );
+ if( z[i]=='+' ) i++;
+ jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]);
return j;
}
case '}': {
@@ -201172,9 +205074,7 @@ json_parse_restart:
case 0x0a:
case 0x0d:
case 0x20: {
- do{
- i++;
- }while( fast_isspace(z[i]) );
+ i += 1 + (u32)strspn(&z[i+1], jsonSpaces);
goto json_parse_restart;
}
case 0x0b:
@@ -201196,7 +205096,7 @@ json_parse_restart:
}
case 'n': {
if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
return i+4;
}
/* fall-through into the default case that checks for NaN */
@@ -201212,8 +205112,11 @@ json_parse_restart:
continue;
}
if( sqlite3Isalnum(z[i+nn]) ) continue;
- jsonParseAddNode(pParse, aNanInfName[k].eType,
- aNanInfName[k].nRepl, aNanInfName[k].zRepl);
+ if( aNanInfName[k].eType==JSONB_FLOAT ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else{
+ jsonBlobAppendOneByte(pParse, JSONB_NULL);
+ }
pParse->hasNonstd = 1;
return i + nn;
}
@@ -201223,30 +205126,35 @@ json_parse_restart:
} /* End switch(z[i]) */
}
+
/*
** Parse a complete JSON string. Return 0 on success or non-zero if there
-** are any errors. If an error occurs, free all memory associated with
-** pParse.
+** are any errors. If an error occurs, free all memory held by pParse,
+** but not pParse itself.
**
-** pParse is uninitialized when this routine is called.
+** pParse must be initialized to an empty parse object prior to calling
+** this routine.
*/
-static int jsonParse(
+static int jsonConvertTextToBlob(
JsonParse *pParse, /* Initialize and fill this JsonParse object */
- sqlite3_context *pCtx, /* Report errors here */
- const char *zJson /* Input JSON text to be parsed */
+ sqlite3_context *pCtx /* Report errors here */
){
int i;
- memset(pParse, 0, sizeof(*pParse));
- if( zJson==0 ) return 1;
- pParse->zJson = zJson;
- i = jsonParseValue(pParse, 0);
+ const char *zJson = pParse->zJson;
+ i = jsonTranslateTextToBlob(pParse, 0);
if( pParse->oom ) i = -1;
if( i>0 ){
+#ifdef SQLITE_DEBUG
assert( pParse->iDepth==0 );
- while( fast_isspace(zJson[i]) ) i++;
+ if( sqlite3Config.bJsonSelfcheck ){
+ assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 );
+ }
+#endif
+ while( jsonIsspace(zJson[i]) ) i++;
if( zJson[i] ){
i += json5Whitespace(&zJson[i]);
if( zJson[i] ){
+ if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1);
jsonParseReset(pParse);
return 1;
}
@@ -201267,183 +205175,715 @@ static int jsonParse(
return 0;
}
-/* Mark node i of pParse as being a child of iParent. Call recursively
-** to fill in all the descendants of node i.
+/*
+** The input string pStr is a well-formed JSON text string. Convert
+** this into the JSONB format and make it the return value of the
+** SQL function.
*/
-static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
- JsonNode *pNode = &pParse->aNode[i];
- u32 j;
- pParse->aUp[i] = iParent;
- switch( pNode->eType ){
- case JSON_ARRAY: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
- jsonParseFillInParentage(pParse, i+j, i);
+static void jsonReturnStringAsBlob(JsonString *pStr){
+ JsonParse px;
+ memset(&px, 0, sizeof(px));
+ jsonStringTerminate(pStr);
+ px.zJson = pStr->zBuf;
+ px.nJson = pStr->nUsed;
+ px.db = sqlite3_context_db_handle(pStr->pCtx);
+ (void)jsonTranslateTextToBlob(&px, 0);
+ if( px.oom ){
+ sqlite3DbFree(px.db, px.aBlob);
+ sqlite3_result_error_nomem(pStr->pCtx);
+ }else{
+ assert( px.nBlobAlloc>0 );
+ assert( !px.bReadOnly );
+ sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC);
+ }
+}
+
+/* The byte at index i is a node type-code. This routine
+** determines the payload size for that node and writes that
+** payload size in to *pSz. It returns the offset from i to the
+** beginning of the payload. Return 0 on error.
+*/
+static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){
+ u8 x;
+ u32 sz;
+ u32 n;
+ if( NEVER(i>pParse->nBlob) ){
+ *pSz = 0;
+ return 0;
+ }
+ x = pParse->aBlob[i]>>4;
+ if( x<=11 ){
+ sz = x;
+ n = 1;
+ }else if( x==12 ){
+ if( i+1>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = pParse->aBlob[i+1];
+ n = 2;
+ }else if( x==13 ){
+ if( i+2>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2];
+ n = 3;
+ }else if( x==14 ){
+ if( i+4>=pParse->nBlob ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) +
+ (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4];
+ n = 5;
+ }else{
+ if( i+8>=pParse->nBlob
+ || pParse->aBlob[i+1]!=0
+ || pParse->aBlob[i+2]!=0
+ || pParse->aBlob[i+3]!=0
+ || pParse->aBlob[i+4]!=0
+ ){
+ *pSz = 0;
+ return 0;
+ }
+ sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) +
+ (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8];
+ n = 9;
+ }
+ if( (i64)i+sz+n > pParse->nBlob
+ && (i64)i+sz+n > pParse->nBlob-pParse->delta
+ ){
+ sz = 0;
+ n = 0;
+ }
+ *pSz = sz;
+ return n;
+}
+
+
+/*
+** Translate the binary JSONB representation of JSON beginning at
+** pParse->aBlob[i] into a JSON text string. Append the JSON
+** text onto the end of pOut. Return the index in pParse->aBlob[]
+** of the first byte past the end of the element that is translated.
+**
+** If an error is detected in the BLOB input, the pOut->eErr flag
+** might get set to JSTRING_MALFORMED. But not all BLOB input errors
+** are detected. So a malformed JSONB input might either result
+** in an error, or in incorrect JSON.
+**
+** The pOut->eErr JSTRING_OOM flag is set on a OOM.
+*/
+static u32 jsonTranslateBlobToText(
+ const JsonParse *pParse, /* the complete parse of the JSON */
+ u32 i, /* Start rendering at this index */
+ JsonString *pOut /* Write JSON here */
+){
+ u32 sz, n, j, iEnd;
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ return pParse->nBlob+1;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ jsonAppendRawNZ(pOut, "null", 4);
+ return i+1;
+ }
+ case JSONB_TRUE: {
+ jsonAppendRawNZ(pOut, "true", 4);
+ return i+1;
+ }
+ case JSONB_FALSE: {
+ jsonAppendRawNZ(pOut, "false", 5);
+ return i+1;
+ }
+ case JSONB_INT:
+ case JSONB_FLOAT: {
+ if( sz==0 ) goto malformed_jsonb;
+ jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_INT5: { /* Integer literal in hexadecimal notation */
+ u32 k = 2;
+ sqlite3_uint64 u = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ int bOverflow = 0;
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }else if( zIn[0]=='+' ){
+ k++;
+ }
+ for(; k<sz; k++){
+ if( !sqlite3Isxdigit(zIn[k]) ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }else if( (u>>60)!=0 ){
+ bOverflow = 1;
+ }else{
+ u = u*16 + sqlite3HexToInt(zIn[k]);
+ }
}
+ jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u);
break;
}
- case JSON_OBJECT: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
- pParse->aUp[i+j] = i;
- jsonParseFillInParentage(pParse, i+j+1, i);
+ case JSONB_FLOAT5: { /* Float literal missing digits beside "." */
+ u32 k = 0;
+ const char *zIn = (const char*)&pParse->aBlob[i+n];
+ if( sz==0 ) goto malformed_jsonb;
+ if( zIn[0]=='-' ){
+ jsonAppendChar(pOut, '-');
+ k++;
+ }
+ if( zIn[k]=='.' ){
+ jsonAppendChar(pOut, '0');
+ }
+ for(; k<sz; k++){
+ jsonAppendChar(pOut, zIn[k]);
+ if( zIn[k]=='.' && (k+1==sz || !sqlite3Isdigit(zIn[k+1])) ){
+ jsonAppendChar(pOut, '0');
+ }
+ }
+ break;
+ }
+ case JSONB_TEXT:
+ case JSONB_TEXTJ: {
+ jsonAppendChar(pOut, '"');
+ jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXT5: {
+ const char *zIn;
+ u32 k;
+ u32 sz2 = sz;
+ zIn = (const char*)&pParse->aBlob[i+n];
+ jsonAppendChar(pOut, '"');
+ while( sz2>0 ){
+ for(k=0; k<sz2 && zIn[k]!='\\' && zIn[k]!='"'; k++){}
+ if( k>0 ){
+ jsonAppendRawNZ(pOut, zIn, k);
+ if( k>=sz2 ){
+ break;
+ }
+ zIn += k;
+ sz2 -= k;
+ }
+ if( zIn[0]=='"' ){
+ jsonAppendRawNZ(pOut, "\\\"", 2);
+ zIn++;
+ sz2--;
+ continue;
+ }
+ assert( zIn[0]=='\\' );
+ assert( sz2>=1 );
+ if( sz2<2 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ break;
+ }
+ switch( (u8)zIn[1] ){
+ case '\'':
+ jsonAppendChar(pOut, '\'');
+ break;
+ case 'v':
+ jsonAppendRawNZ(pOut, "\\u0009", 6);
+ break;
+ case 'x':
+ if( sz2<4 ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ jsonAppendRawNZ(pOut, "\\u00", 4);
+ jsonAppendRawNZ(pOut, &zIn[2], 2);
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ case '0':
+ jsonAppendRawNZ(pOut, "\\u0000", 6);
+ break;
+ case '\r':
+ if( sz2>2 && zIn[2]=='\n' ){
+ zIn++;
+ sz2--;
+ }
+ break;
+ case '\n':
+ break;
+ case 0xe2:
+ /* '\' followed by either U+2028 or U+2029 is ignored as
+ ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29.
+ ** U+2029 is the same except for the last byte */
+ if( sz2<4
+ || 0x80!=(u8)zIn[2]
+ || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3])
+ ){
+ pOut->eErr |= JSTRING_MALFORMED;
+ sz2 = 2;
+ break;
+ }
+ zIn += 2;
+ sz2 -= 2;
+ break;
+ default:
+ jsonAppendRawNZ(pOut, zIn, 2);
+ break;
+ }
+ assert( sz2>=2 );
+ zIn += 2;
+ sz2 -= 2;
}
+ jsonAppendChar(pOut, '"');
+ break;
+ }
+ case JSONB_TEXTRAW: {
+ jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz);
+ break;
+ }
+ case JSONB_ARRAY: {
+ jsonAppendChar(pOut, '[');
+ j = i+n;
+ iEnd = j+sz;
+ while( j<iEnd && pOut->eErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, ',');
+ }
+ if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, ']');
+ break;
+ }
+ case JSONB_OBJECT: {
+ int x = 0;
+ jsonAppendChar(pOut, '{');
+ j = i+n;
+ iEnd = j+sz;
+ while( j<iEnd && pOut->eErr==0 ){
+ j = jsonTranslateBlobToText(pParse, j, pOut);
+ jsonAppendChar(pOut, (x++ & 1) ? ',' : ':');
+ }
+ if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED;
+ if( sz>0 ) jsonStringTrimOneChar(pOut);
+ jsonAppendChar(pOut, '}');
break;
}
+
default: {
+ malformed_jsonb:
+ pOut->eErr |= JSTRING_MALFORMED;
break;
}
}
+ return i+n+sz;
+}
+
+/* Return true if the input pJson
+**
+** For performance reasons, this routine does not do a detailed check of the
+** input BLOB to ensure that it is well-formed. Hence, false positives are
+** possible. False negatives should never occur, however.
+*/
+static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){
+ u32 sz, n;
+ const u8 *aBlob;
+ int nBlob;
+ JsonParse s;
+ if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0;
+ aBlob = sqlite3_value_blob(pJson);
+ nBlob = sqlite3_value_bytes(pJson);
+ if( nBlob<1 ) return 0;
+ if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0;
+ memset(&s, 0, sizeof(s));
+ s.aBlob = (u8*)aBlob;
+ s.nBlob = nBlob;
+ n = jsonbPayloadSize(&s, 0, &sz);
+ if( n==0 ) return 0;
+ if( sz+n!=(u32)nBlob ) return 0;
+ if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0;
+ return sz+n==(u32)nBlob;
}
/*
-** Compute the parentage of all nodes in a completed parse.
+** Given that a JSONB_ARRAY object starts at offset i, return
+** the number of entries in that array.
*/
-static int jsonParseFindParents(JsonParse *pParse){
- u32 *aUp;
- assert( pParse->aUp==0 );
- aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode );
- if( aUp==0 ){
- pParse->oom = 1;
- return SQLITE_NOMEM;
+static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){
+ u32 n, sz, i, iEnd;
+ u32 k = 0;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ iEnd = iRoot+n+sz;
+ for(i=iRoot+n; n>0 && i<iEnd; i+=sz+n, k++){
+ n = jsonbPayloadSize(pParse, i, &sz);
}
- jsonParseFillInParentage(pParse, 0, 0);
- return SQLITE_OK;
+ return k;
}
/*
-** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+** Edit the payload size of the element at iRoot by the amount in
+** pParse->delta.
*/
-#define JSON_CACHE_ID (-429938) /* First cache entry */
-#define JSON_CACHE_SZ 4 /* Max number of cache entries */
+static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){
+ u32 sz = 0;
+ u32 nBlob;
+ assert( pParse->delta!=0 );
+ assert( pParse->nBlobAlloc >= pParse->nBlob );
+ nBlob = pParse->nBlob;
+ pParse->nBlob = pParse->nBlobAlloc;
+ (void)jsonbPayloadSize(pParse, iRoot, &sz);
+ pParse->nBlob = nBlob;
+ sz += pParse->delta;
+ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz);
+}
/*
-** Obtain a complete parse of the JSON found in the first argument
-** of the argv array. Use the sqlite3_get_auxdata() cache for this
-** parse if it is available. If the cache is not available or if it
-** is no longer valid, parse the JSON again and return the new parse,
-** and also register the new parse so that it will be available for
-** future sqlite3_get_auxdata() calls.
+** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of
+** content beginning at iDel, and replacing them with nIns bytes of
+** content given by aIns.
**
-** If an error occurs and pErrCtx!=0 then report the error on pErrCtx
-** and return NULL.
+** nDel may be zero, in which case no bytes are removed. But iDel is
+** still important as new bytes will be insert beginning at iDel.
**
-** If an error occurs and pErrCtx==0 then return the Parse object with
-** JsonParse.nErr non-zero. If the caller invokes this routine with
-** pErrCtx==0 and it gets back a JsonParse with nErr!=0, then the caller
-** is responsible for invoking jsonParseFree() on the returned value.
-** But the caller may invoke jsonParseFree() *only* if pParse->nErr!=0.
+** aIns may be zero, in which case space is created to hold nIns bytes
+** beginning at iDel, but that space is uninitialized.
+**
+** Set pParse->oom if an OOM occurs.
*/
-static JsonParse *jsonParseCached(
- sqlite3_context *pCtx,
- sqlite3_value **argv,
- sqlite3_context *pErrCtx
+static void jsonBlobEdit(
+ JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */
+ u32 iDel, /* First byte to be removed */
+ u32 nDel, /* Number of bytes to remove */
+ const u8 *aIns, /* Content to insert */
+ u32 nIns /* Bytes of content to insert */
){
- const char *zJson = (const char*)sqlite3_value_text(argv[0]);
- int nJson = sqlite3_value_bytes(argv[0]);
- JsonParse *p;
- JsonParse *pMatch = 0;
- int iKey;
- int iMinKey = 0;
- u32 iMinHold = 0xffffffff;
- u32 iMaxHold = 0;
- if( zJson==0 ) return 0;
- for(iKey=0; iKey<JSON_CACHE_SZ; iKey++){
- p = (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iKey);
- if( p==0 ){
- iMinKey = iKey;
- break;
+ i64 d = (i64)nIns - (i64)nDel;
+ if( d!=0 ){
+ if( pParse->nBlob + d > pParse->nBlobAlloc ){
+ jsonBlobExpand(pParse, pParse->nBlob+d);
+ if( pParse->oom ) return;
}
- if( pMatch==0
- && p->nJson==nJson
- && memcmp(p->zJson,zJson,nJson)==0
- ){
- p->nErr = 0;
- pMatch = p;
- }else if( p->iHold<iMinHold ){
- iMinHold = p->iHold;
- iMinKey = iKey;
+ memmove(&pParse->aBlob[iDel+nIns],
+ &pParse->aBlob[iDel+nDel],
+ pParse->nBlob - (iDel+nDel));
+ pParse->nBlob += d;
+ pParse->delta += d;
+ }
+ if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns);
+}
+
+/*
+** Return the number of escaped newlines to be ignored.
+** An escaped newline is a one of the following byte sequences:
+**
+** 0x5c 0x0a
+** 0x5c 0x0d
+** 0x5c 0x0d 0x0a
+** 0x5c 0xe2 0x80 0xa8
+** 0x5c 0xe2 0x80 0xa9
+*/
+static u32 jsonBytesToBypass(const char *z, u32 n){
+ u32 i = 0;
+ while( i+1<n ){
+ if( z[i]!='\\' ) return i;
+ if( z[i+1]=='\n' ){
+ i += 2;
+ continue;
}
- if( p->iHold>iMaxHold ){
- iMaxHold = p->iHold;
+ if( z[i+1]=='\r' ){
+ if( i+2<n && z[i+2]=='\n' ){
+ i += 3;
+ }else{
+ i += 2;
+ }
+ continue;
+ }
+ if( 0xe2==(u8)z[i+1]
+ && i+3<n
+ && 0x80==(u8)z[i+2]
+ && (0xa8==(u8)z[i+3] || 0xa9==(u8)z[i+3])
+ ){
+ i += 4;
+ continue;
}
+ break;
}
- if( pMatch ){
- pMatch->nErr = 0;
- pMatch->iHold = iMaxHold+1;
- return pMatch;
+ return i;
+}
+
+/*
+** Input z[0..n] defines JSON escape sequence including the leading '\\'.
+** Decode that escape sequence into a single character. Write that
+** character into *piOut. Return the number of bytes in the escape sequence.
+**
+** If there is a syntax error of some kind (for example too few characters
+** after the '\\' to complete the encoding) then *piOut is set to
+** JSON_INVALID_CHAR.
+*/
+static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){
+ assert( n>0 );
+ assert( z[0]=='\\' );
+ if( n<2 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
}
- p = sqlite3_malloc64( sizeof(*p) + nJson + 1 );
- if( p==0 ){
- sqlite3_result_error_nomem(pCtx);
- return 0;
+ switch( (u8)z[1] ){
+ case 'u': {
+ u32 v, vlo;
+ if( n<6 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ v = jsonHexToInt4(&z[2]);
+ if( (v & 0xfc00)==0xd800
+ && n>=12
+ && z[6]=='\\'
+ && z[7]=='u'
+ && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00
+ ){
+ *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000;
+ return 12;
+ }else{
+ *piOut = v;
+ return 6;
+ }
+ }
+ case 'b': { *piOut = '\b'; return 2; }
+ case 'f': { *piOut = '\f'; return 2; }
+ case 'n': { *piOut = '\n'; return 2; }
+ case 'r': { *piOut = '\r'; return 2; }
+ case 't': { *piOut = '\t'; return 2; }
+ case 'v': { *piOut = '\v'; return 2; }
+ case '0': { *piOut = 0; return 2; }
+ case '\'':
+ case '"':
+ case '/':
+ case '\\':{ *piOut = z[1]; return 2; }
+ case 'x': {
+ if( n<4 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }
+ *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]);
+ return 4;
+ }
+ case 0xe2:
+ case '\r':
+ case '\n': {
+ u32 nSkip = jsonBytesToBypass(z, n);
+ if( nSkip==0 ){
+ *piOut = JSON_INVALID_CHAR;
+ return n;
+ }else if( nSkip==n ){
+ *piOut = 0;
+ return n;
+ }else if( z[nSkip]=='\\' ){
+ return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut);
+ }else{
+ int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut);
+ return nSkip + sz;
+ }
+ }
+ default: {
+ *piOut = JSON_INVALID_CHAR;
+ return 2;
+ }
}
- memset(p, 0, sizeof(*p));
- p->zJson = (char*)&p[1];
- memcpy((char*)p->zJson, zJson, nJson+1);
- if( jsonParse(p, pErrCtx, p->zJson) ){
- if( pErrCtx==0 ){
- p->nErr = 1;
- return p;
+}
+
+
+/*
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ.
+**
+** In this version, we know that one or the other or both of the
+** two comparands contains an escape sequence.
+*/
+static SQLITE_NOINLINE int jsonLabelCompareEscaped(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ u32 cLeft, cRight;
+ assert( rawLeft==0 || rawRight==0 );
+ while( 1 /*exit-by-return*/ ){
+ if( nLeft==0 ){
+ cLeft = 0;
+ }else if( rawLeft || zLeft[0]!='\\' ){
+ cLeft = ((u8*)zLeft)[0];
+ if( cLeft>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft);
+ zLeft += sz;
+ nLeft -= sz;
+ }else{
+ zLeft++;
+ nLeft--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft);
+ zLeft += n;
+ assert( n<=nLeft );
+ nLeft -= n;
+ }
+ if( nRight==0 ){
+ cRight = 0;
+ }else if( rawRight || zRight[0]!='\\' ){
+ cRight = ((u8*)zRight)[0];
+ if( cRight>=0xc0 ){
+ int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight);
+ zRight += sz;
+ nRight -= sz;
+ }else{
+ zRight++;
+ nRight--;
+ }
+ }else{
+ u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight);
+ zRight += n;
+ assert( n<=nRight );
+ nRight -= n;
}
- sqlite3_free(p);
- return 0;
+ if( cLeft!=cRight ) return 0;
+ if( cLeft==0 ) return 1;
}
- p->nJson = nJson;
- p->iHold = iMaxHold+1;
- sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p,
- (void(*)(void*))jsonParseFree);
- return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);
}
/*
-** Compare the OBJECT label at pNode against zKey,nKey. Return true on
-** a match.
+** Compare two object labels. Return 1 if they are equal and
+** 0 if they differ. Return -1 if an OOM occurs.
*/
-static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){
- assert( pNode->eU==1 );
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->n!=nKey ) return 0;
- return strncmp(pNode->u.zJContent, zKey, nKey)==0;
+static int jsonLabelCompare(
+ const char *zLeft, /* The left label */
+ u32 nLeft, /* Size of the left label in bytes */
+ int rawLeft, /* True if zLeft contains no escapes */
+ const char *zRight, /* The right label */
+ u32 nRight, /* Size of the right label in bytes */
+ int rawRight /* True if zRight is escape-free */
+){
+ if( rawLeft && rawRight ){
+ /* Simpliest case: Neither label contains escapes. A simple
+ ** memcmp() is sufficient. */
+ if( nLeft!=nRight ) return 0;
+ return memcmp(zLeft, zRight, nLeft)==0;
}else{
- if( pNode->n!=nKey+2 ) return 0;
- return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
+ return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft,
+ zRight, nRight, rawRight);
}
}
-static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){
- if( p1->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p2, p1->u.zJContent, p1->n);
- }else if( p2->jnFlags & JNODE_RAW ){
- return jsonLabelCompare(p1, p2->u.zJContent, p2->n);
+
+/*
+** Error returns from jsonLookupStep()
+*/
+#define JSON_LOOKUP_ERROR 0xffffffff
+#define JSON_LOOKUP_NOTFOUND 0xfffffffe
+#define JSON_LOOKUP_PATHERROR 0xfffffffd
+#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR)
+
+/* Forward declaration */
+static u32 jsonLookupStep(JsonParse*,u32,const char*,u32);
+
+
+/* This helper routine for jsonLookupStep() populates pIns with
+** binary data that is to be inserted into pParse.
+**
+** In the common case, pIns just points to pParse->aIns and pParse->nIns.
+** But if the zPath of the original edit operation includes path elements
+** that go deeper, additional substructure must be created.
+**
+** For example:
+**
+** json_insert('{}', '$.a.b.c', 123);
+**
+** The search stops at '$.a' But additional substructure must be
+** created for the ".b.c" part of the patch so that the final result
+** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with
+** the binary equivalent of {"b":{"c":123}} so that it can be inserted.
+**
+** The caller is responsible for resetting pIns when it has finished
+** using the substructure.
+*/
+static u32 jsonCreateEditSubstructure(
+ JsonParse *pParse, /* The original JSONB that is being edited */
+ JsonParse *pIns, /* Populate this with the blob data to insert */
+ const char *zTail /* Tail of the path that determins substructure */
+){
+ static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT };
+ int rc;
+ memset(pIns, 0, sizeof(*pIns));
+ pIns->db = pParse->db;
+ if( zTail[0]==0 ){
+ /* No substructure. Just insert what is given in pParse. */
+ pIns->aBlob = pParse->aIns;
+ pIns->nBlob = pParse->nIns;
+ rc = 0;
}else{
- return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0;
+ /* Construct the binary substructure */
+ pIns->nBlob = 1;
+ pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.'];
+ pIns->eEdit = pParse->eEdit;
+ pIns->nIns = pParse->nIns;
+ pIns->aIns = pParse->aIns;
+ rc = jsonLookupStep(pIns, 0, zTail, 0);
+ pParse->oom |= pIns->oom;
}
+ return rc; /* Error code only */
}
-/* forward declaration */
-static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
-
/*
-** Search along zPath to find the node specified. Return a pointer
-** to that node, or NULL if zPath is malformed or if there is no such
-** node.
+** Search along zPath to find the Json element specified. Return an
+** index into pParse->aBlob[] for the start of that element's value.
+**
+** If the value found by this routine is the value half of label/value pair
+** within an object, then set pPath->iLabel to the start of the corresponding
+** label, before returning.
+**
+** Return one of the JSON_LOOKUP error codes if problems are seen.
**
-** If pApnd!=0, then try to append new nodes to complete zPath if it is
-** possible to do so and if no existing node corresponds to zPath. If
-** new nodes are appended *pApnd is set to 1.
+** This routine will also modify the blob. If pParse->eEdit is one of
+** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be
+** made to the selected value. If an edit is performed, then the return
+** value does not necessarily point to the select element. If an edit
+** is performed, the return value is only useful for detecting error
+** conditions.
*/
-static JsonNode *jsonLookupStep(
+static u32 jsonLookupStep(
JsonParse *pParse, /* The JSON to search */
- u32 iRoot, /* Begin the search at this node */
+ u32 iRoot, /* Begin the search at this element of aBlob[] */
const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- const char **pzErr /* Make *pzErr point to any syntax error in zPath */
+ u32 iLabel /* Label if iRoot is a value of in an object */
){
- u32 i, j, nKey;
+ u32 i, j, k, nKey, sz, n, iEnd, rc;
const char *zKey;
- JsonNode *pRoot = &pParse->aNode[iRoot];
- if( zPath[0]==0 ) return pRoot;
- if( pRoot->jnFlags & JNODE_REPLACE ) return 0;
+ u8 x;
+
+ if( zPath[0]==0 ){
+ if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ sz += n;
+ if( pParse->eEdit==JEDIT_DEL ){
+ if( iLabel>0 ){
+ sz += iRoot - iLabel;
+ iRoot = iLabel;
+ }
+ jsonBlobEdit(pParse, iRoot, sz, 0, 0);
+ }else if( pParse->eEdit==JEDIT_INS ){
+ /* Already exists, so json_insert() is a no-op */
+ }else{
+ /* json_set() or json_replace() */
+ jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns);
+ }
+ }
+ pParse->iLabel = iLabel;
+ return iRoot;
+ }
if( zPath[0]=='.' ){
- if( pRoot->eType!=JSON_OBJECT ) return 0;
+ int rawKey = 1;
+ x = pParse->aBlob[iRoot];
zPath++;
if( zPath[0]=='"' ){
zKey = zPath + 1;
@@ -201452,303 +205892,849 @@ static JsonNode *jsonLookupStep(
if( zPath[i] ){
i++;
}else{
- *pzErr = zPath;
- return 0;
+ return JSON_LOOKUP_PATHERROR;
}
testcase( nKey==0 );
+ rawKey = memchr(zKey, '\\', nKey)==0;
}else{
zKey = zPath;
for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
nKey = i;
if( nKey==0 ){
- *pzErr = zPath;
- return 0;
- }
- }
- j = 1;
- for(;;){
- while( j<=pRoot->n ){
- if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
- return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
- }
- j++;
- j += jsonNodeSize(&pRoot[j]);
+ return JSON_LOOKUP_PATHERROR;
+ }
+ }
+ if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ j = iRoot + n; /* j is the index of a label */
+ iEnd = j+sz;
+ while( j<iEnd ){
+ int rawLabel;
+ const char *zLabel;
+ x = pParse->aBlob[j] & 0x0f;
+ if( x<JSONB_TEXT || x>JSONB_TEXTRAW ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ k = j+n; /* k is the index of the label text */
+ if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR;
+ zLabel = (const char*)&pParse->aBlob[k];
+ rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW;
+ if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){
+ u32 v = k+sz; /* v is the index of the value */
+ if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, v, &sz);
+ if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR;
+ assert( j>0 );
+ rc = jsonLookupStep(pParse, v, &zPath[i], j);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pRoot->eU==2 );
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( pApnd ){
- u32 iStart, iLabel;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- zPath += i;
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
- pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
- }
- return pNode;
+ j = k+sz;
+ if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( pParse->eEdit>=JEDIT_INS ){
+ u32 nIns; /* Total bytes to insert (label+value) */
+ JsonParse v; /* BLOB encoding of the value to be inserted */
+ JsonParse ix; /* Header of the label to be inserted */
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ memset(&ix, 0, sizeof(ix));
+ ix.db = pParse->db;
+ jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0);
+ pParse->oom |= ix.oom;
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob)
+ ){
+ assert( !pParse->oom );
+ nIns = ix.nBlob + nKey + v.nBlob;
+ jsonBlobEdit(pParse, j, 0, 0, nIns);
+ if( !pParse->oom ){
+ assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */
+ assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */
+ memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob);
+ k = j + ix.nBlob;
+ memcpy(&pParse->aBlob[k], zKey, nKey);
+ k += nKey;
+ memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob);
+ if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ }
+ }
+ jsonParseReset(&v);
+ jsonParseReset(&ix);
+ return rc;
}
}else if( zPath[0]=='[' ){
- i = 0;
- j = 1;
- while( sqlite3Isdigit(zPath[j]) ){
- i = i*10 + zPath[j] - '0';
- j++;
+ x = pParse->aBlob[iRoot] & 0x0f;
+ if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND;
+ n = jsonbPayloadSize(pParse, iRoot, &sz);
+ k = 0;
+ i = 1;
+ while( sqlite3Isdigit(zPath[i]) ){
+ k = k*10 + zPath[i] - '0';
+ i++;
}
- if( j<2 || zPath[j]!=']' ){
+ if( i<2 || zPath[i]!=']' ){
if( zPath[1]=='#' ){
- JsonNode *pBase = pRoot;
- int iBase = iRoot;
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- for(;;){
- while( j<=pBase->n ){
- if( (pBase[j].jnFlags & JNODE_REMOVE)==0 ) i++;
- j += jsonNodeSize(&pBase[j]);
- }
- if( (pBase->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pBase->eU==2 );
- iBase += pBase->u.iAppend;
- pBase = &pParse->aNode[iBase];
- j = 1;
- }
- j = 2;
+ k = jsonbArrayCount(pParse, iRoot);
+ i = 2;
if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){
- unsigned int x = 0;
- j = 3;
+ unsigned int nn = 0;
+ i = 3;
do{
- x = x*10 + zPath[j] - '0';
- j++;
- }while( sqlite3Isdigit(zPath[j]) );
- if( x>i ) return 0;
- i -= x;
+ nn = nn*10 + zPath[i] - '0';
+ i++;
+ }while( sqlite3Isdigit(zPath[i]) );
+ if( nn>k ) return JSON_LOOKUP_NOTFOUND;
+ k -= nn;
}
- if( zPath[j]!=']' ){
- *pzErr = zPath;
- return 0;
+ if( zPath[i]!=']' ){
+ return JSON_LOOKUP_PATHERROR;
}
}else{
- *pzErr = zPath;
- return 0;
+ return JSON_LOOKUP_PATHERROR;
}
}
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- zPath += j + 1;
- j = 1;
- for(;;){
- while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
- if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- assert( pRoot->eU==2 );
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( j<=pRoot->n ){
- return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
- }
- if( i==0 && pApnd ){
- u32 iStart;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- assert( pRoot->eU==0 );
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- VVA( pRoot->eU = 2 );
+ j = iRoot+n;
+ iEnd = j+sz;
+ while( j<iEnd ){
+ if( k==0 ){
+ rc = jsonLookupStep(pParse, j, &zPath[i+1], 0);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
- return pNode;
+ k--;
+ n = jsonbPayloadSize(pParse, j, &sz);
+ if( n==0 ) return JSON_LOOKUP_ERROR;
+ j += n+sz;
+ }
+ if( j>iEnd ) return JSON_LOOKUP_ERROR;
+ if( k>0 ) return JSON_LOOKUP_NOTFOUND;
+ if( pParse->eEdit>=JEDIT_INS ){
+ JsonParse v;
+ testcase( pParse->eEdit==JEDIT_INS );
+ testcase( pParse->eEdit==JEDIT_SET );
+ rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]);
+ if( !JSON_LOOKUP_ISERROR(rc)
+ && jsonBlobMakeEditable(pParse, v.nBlob)
+ ){
+ assert( !pParse->oom );
+ jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob);
+ }
+ jsonParseReset(&v);
+ if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot);
+ return rc;
}
}else{
- *pzErr = zPath;
+ return JSON_LOOKUP_PATHERROR;
}
- return 0;
+ return JSON_LOOKUP_NOTFOUND;
}
/*
-** Append content to pParse that will complete zPath. Return a pointer
-** to the inserted node, or return NULL if the append fails.
+** Convert a JSON BLOB into text and make that text the return value
+** of an SQL function.
*/
-static JsonNode *jsonLookupAppend(
- JsonParse *pParse, /* Append content to the JSON parse */
- const char *zPath, /* Description of content to append */
- int *pApnd, /* Set this flag to 1 */
- const char **pzErr /* Make this point to any syntax error */
+static void jsonReturnTextJsonFromBlob(
+ sqlite3_context *ctx,
+ const u8 *aBlob,
+ u32 nBlob
){
- *pApnd = 1;
- if( zPath[0]==0 ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
- }
- if( zPath[0]=='.' ){
- jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- }else if( strncmp(zPath,"[0]",3)==0 ){
- jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- }else{
- return 0;
- }
- if( pParse->oom ) return 0;
- return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
+ JsonParse x;
+ JsonString s;
+
+ if( NEVER(aBlob==0) ) return;
+ memset(&x, 0, sizeof(x));
+ x.aBlob = (u8*)aBlob;
+ x.nBlob = nBlob;
+ jsonStringInit(&s, ctx);
+ jsonTranslateBlobToText(&x, 0, &s);
+ jsonReturnString(&s, 0, 0);
}
+
/*
-** Return the text of a syntax error message on a JSON path. Space is
-** obtained from sqlite3_malloc().
+** Return the value of the BLOB node at index i.
+**
+** If the value is a primitive, return it as an SQL value.
+** If the value is an array or object, return it as either
+** JSON text or the BLOB encoding, depending on the JSON_B flag
+** on the userdata.
*/
-static char *jsonPathSyntaxError(const char *zErr){
- return sqlite3_mprintf("JSON path error near '%q'", zErr);
+static void jsonReturnFromBlob(
+ JsonParse *pParse, /* Complete JSON parse tree */
+ u32 i, /* Index of the node */
+ sqlite3_context *pCtx, /* Return value for this function */
+ int textOnly /* return text JSON. Disregard user-data */
+){
+ u32 n, sz;
+ int rc;
+ sqlite3 *db = sqlite3_context_db_handle(pCtx);
+
+ n = jsonbPayloadSize(pParse, i, &sz);
+ if( n==0 ){
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
+ }
+ switch( pParse->aBlob[i] & 0x0f ){
+ case JSONB_NULL: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_null(pCtx);
+ break;
+ }
+ case JSONB_TRUE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 1);
+ break;
+ }
+ case JSONB_FALSE: {
+ if( sz ) goto returnfromblob_malformed;
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }
+ case JSONB_INT5:
+ case JSONB_INT: {
+ sqlite3_int64 iRes = 0;
+ char *z;
+ int bNeg = 0;
+ char x;
+ if( sz==0 ) goto returnfromblob_malformed;
+ x = (char)pParse->aBlob[i+n];
+ if( x=='-' ){
+ if( sz<2 ) goto returnfromblob_malformed;
+ n++;
+ sz--;
+ bNeg = 1;
+ }
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3DecOrHexToI64(z, &iRes);
+ sqlite3DbFree(db, z);
+ if( rc==0 ){
+ sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes);
+ }else if( rc==3 && bNeg ){
+ sqlite3_result_int64(pCtx, SMALLEST_INT64);
+ }else if( rc==1 ){
+ goto returnfromblob_malformed;
+ }else{
+ if( bNeg ){ n--; sz++; }
+ goto to_double;
+ }
+ break;
+ }
+ case JSONB_FLOAT5:
+ case JSONB_FLOAT: {
+ double r;
+ char *z;
+ if( sz==0 ) goto returnfromblob_malformed;
+ to_double:
+ z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz);
+ if( z==0 ) goto returnfromblob_oom;
+ rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
+ sqlite3DbFree(db, z);
+ if( rc<=0 ) goto returnfromblob_malformed;
+ sqlite3_result_double(pCtx, r);
+ break;
+ }
+ case JSONB_TEXTRAW:
+ case JSONB_TEXT: {
+ sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz,
+ SQLITE_TRANSIENT);
+ break;
+ }
+ case JSONB_TEXT5:
+ case JSONB_TEXTJ: {
+ /* Translate JSON formatted string into raw text */
+ u32 iIn, iOut;
+ const char *z;
+ char *zOut;
+ u32 nOut = sz;
+ z = (const char*)&pParse->aBlob[i+n];
+ zOut = sqlite3DbMallocRaw(db, nOut+1);
+ if( zOut==0 ) goto returnfromblob_oom;
+ for(iIn=iOut=0; iIn<sz; iIn++){
+ char c = z[iIn];
+ if( c=='\\' ){
+ u32 v;
+ u32 szEscape = jsonUnescapeOneChar(&z[iIn], sz-iIn, &v);
+ if( v<=0x7f ){
+ zOut[iOut++] = (char)v;
+ }else if( v<=0x7ff ){
+ assert( szEscape>=2 );
+ zOut[iOut++] = (char)(0xc0 | (v>>6));
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v<0x10000 ){
+ assert( szEscape>=3 );
+ zOut[iOut++] = 0xe0 | (v>>12);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }else if( v==JSON_INVALID_CHAR ){
+ /* Silently ignore illegal unicode */
+ }else{
+ assert( szEscape>=4 );
+ zOut[iOut++] = 0xf0 | (v>>18);
+ zOut[iOut++] = 0x80 | ((v>>12)&0x3f);
+ zOut[iOut++] = 0x80 | ((v>>6)&0x3f);
+ zOut[iOut++] = 0x80 | (v&0x3f);
+ }
+ iIn += szEscape - 1;
+ }else{
+ zOut[iOut++] = c;
+ }
+ } /* end for() */
+ assert( iOut<=nOut );
+ zOut[iOut] = 0;
+ sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC);
+ break;
+ }
+ case JSONB_ARRAY:
+ case JSONB_OBJECT: {
+ int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx));
+ if( flags & JSON_BLOB ){
+ sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT);
+ }else{
+ jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n);
+ }
+ break;
+ }
+ default: {
+ goto returnfromblob_malformed;
+ }
+ }
+ return;
+
+returnfromblob_oom:
+ sqlite3_result_error_nomem(pCtx);
+ return;
+
+returnfromblob_malformed:
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ return;
}
/*
-** Do a node lookup using zPath. Return a pointer to the node on success.
-** Return NULL if not found or if there is an error.
+** pArg is a function argument that might be an SQL value or a JSON
+** value. Figure out what it is and encode it as a JSONB blob.
+** Return the results in pParse.
**
-** On an error, write an error message into pCtx and increment the
-** pParse->nErr counter.
+** pParse is uninitialized upon entry. This routine will handle the
+** initialization of pParse. The result will be contained in
+** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically
+** allocated (if pParse->nBlobAlloc is greater than zero) in which case
+** the caller is responsible for freeing the space allocated to pParse->aBlob
+** when it has finished with it. Or pParse->aBlob might be a static string
+** or a value obtained from sqlite3_value_blob(pArg).
**
-** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
-** nodes are appended.
+** If the argument is a BLOB that is clearly not a JSONB, then this
+** function might set an error message in ctx and return non-zero.
+** It might also set an error message and return non-zero on an OOM error.
*/
-static JsonNode *jsonLookup(
- JsonParse *pParse, /* The JSON to search */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- sqlite3_context *pCtx /* Report errors here, if not NULL */
-){
- const char *zErr = 0;
- JsonNode *pNode = 0;
- char *zMsg;
-
- if( zPath==0 ) return 0;
- if( zPath[0]!='$' ){
- zErr = zPath;
- goto lookup_err;
+static int jsonFunctionArgToBlob(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ JsonParse *pParse
+){
+ int eType = sqlite3_value_type(pArg);
+ static u8 aNull[] = { 0x00 };
+ memset(pParse, 0, sizeof(pParse[0]));
+ pParse->db = sqlite3_context_db_handle(ctx);
+ switch( eType ){
+ default: {
+ pParse->aBlob = aNull;
+ pParse->nBlob = 1;
+ return 0;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(pArg) ){
+ pParse->aBlob = (u8*)sqlite3_value_blob(pArg);
+ pParse->nBlob = sqlite3_value_bytes(pArg);
+ }else{
+ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1);
+ return 1;
+ }
+ break;
+ }
+ case SQLITE_TEXT: {
+ const char *zJson = (const char*)sqlite3_value_text(pArg);
+ int nJson = sqlite3_value_bytes(pArg);
+ if( zJson==0 ) return 1;
+ if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){
+ pParse->zJson = (char*)zJson;
+ pParse->nJson = nJson;
+ if( jsonConvertTextToBlob(pParse, ctx) ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ sqlite3DbFree(pParse->db, pParse->aBlob);
+ memset(pParse, 0, sizeof(pParse[0]));
+ return 1;
+ }
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson);
+ }
+ break;
+ }
+ case SQLITE_FLOAT: {
+ double r = sqlite3_value_double(pArg);
+ if( NEVER(sqlite3IsNaN(r)) ){
+ jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0);
+ }else{
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ if( z[0]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999");
+ }else if( z[0]=='-' && z[1]=='I' ){
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999");
+ }else{
+ jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z);
+ }
+ }
+ break;
+ }
+ case SQLITE_INTEGER: {
+ int n = sqlite3_value_bytes(pArg);
+ const char *z = (const char*)sqlite3_value_text(pArg);
+ if( z==0 ) return 1;
+ jsonBlobAppendNode(pParse, JSONB_INT, n, z);
+ break;
+ }
+ }
+ if( pParse->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return 1;
+ }else{
+ return 0;
}
- zPath++;
- pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
- if( zErr==0 ) return pNode;
+}
-lookup_err:
- pParse->nErr++;
- assert( zErr!=0 && pCtx!=0 );
- zMsg = jsonPathSyntaxError(zErr);
+/*
+** Generate a bad path error.
+**
+** If ctx is not NULL then push the error message into ctx and return NULL.
+** If ctx is NULL, then return the text of the error message.
+*/
+static char *jsonBadPathError(
+ sqlite3_context *ctx, /* The function call containing the error */
+ const char *zPath /* The path with the problem */
+){
+ char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath);
+ if( ctx==0 ) return zMsg;
if( zMsg ){
- sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_result_error(ctx, zMsg, -1);
sqlite3_free(zMsg);
}else{
- sqlite3_result_error_nomem(pCtx);
+ sqlite3_result_error_nomem(ctx);
}
return 0;
}
+/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent
+** arguments come in parse where each pair contains a JSON path and
+** content to insert or set at that patch. Do the updates
+** and return the result.
+**
+** The specific operation is determined by eEdit, which can be one
+** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET.
+*/
+static void jsonInsertIntoBlob(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv,
+ int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */
+){
+ int i;
+ u32 rc = 0;
+ const char *zPath = 0;
+ int flgs;
+ JsonParse *p;
+ JsonParse ax;
+
+ assert( (argc&1)==1 );
+ flgs = argc==1 ? 0 : JSON_EDITABLE;
+ p = jsonParseFuncArg(ctx, argv[0], flgs);
+ if( p==0 ) return;
+ for(i=1; i<argc-1; i+=2){
+ if( sqlite3_value_type(argv[i])==SQLITE_NULL ) continue;
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ if( zPath==0 ){
+ sqlite3_result_error_nomem(ctx);
+ jsonParseFree(p);
+ return;
+ }
+ if( zPath[0]!='$' ) goto jsonInsertIntoBlob_patherror;
+ if( jsonFunctionArgToBlob(ctx, argv[i+1], &ax) ){
+ jsonParseReset(&ax);
+ jsonParseFree(p);
+ return;
+ }
+ if( zPath[1]==0 ){
+ if( eEdit==JEDIT_REPL || eEdit==JEDIT_SET ){
+ jsonBlobEdit(p, 0, p->nBlob, ax.aBlob, ax.nBlob);
+ }
+ rc = 0;
+ }else{
+ p->eEdit = eEdit;
+ p->nIns = ax.nBlob;
+ p->aIns = ax.aBlob;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ }
+ jsonParseReset(&ax);
+ if( rc==JSON_LOOKUP_NOTFOUND ) continue;
+ if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror;
+ }
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+jsonInsertIntoBlob_patherror:
+ jsonParseFree(p);
+ if( rc==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ }
+ return;
+}
/*
-** Report the wrong number of arguments for json_insert(), json_replace()
-** or json_set().
+** If pArg is a blob that seems like a JSONB blob, then initialize
+** p to point to that JSONB and return TRUE. If pArg does not seem like
+** a JSONB blob, then return FALSE;
+**
+** This routine is only called if it is already known that pArg is a
+** blob. The only open question is whether or not the blob appears
+** to be a JSONB blob.
*/
-static void jsonWrongNumArgs(
- sqlite3_context *pCtx,
- const char *zFuncName
-){
- char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
- zFuncName);
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
+static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){
+ u32 n, sz = 0;
+ p->aBlob = (u8*)sqlite3_value_blob(pArg);
+ p->nBlob = (u32)sqlite3_value_bytes(pArg);
+ if( p->nBlob==0 ){
+ p->aBlob = 0;
+ return 0;
+ }
+ if( NEVER(p->aBlob==0) ){
+ return 0;
+ }
+ if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT
+ && (n = jsonbPayloadSize(p, 0, &sz))>0
+ && sz+n==p->nBlob
+ && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0)
+ ){
+ return 1;
+ }
+ p->aBlob = 0;
+ p->nBlob = 0;
+ return 0;
}
/*
-** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
+** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob,
+** from the SQL function argument pArg. Return a pointer to the new
+** JsonParse object.
+**
+** Ownership of the new JsonParse object is passed to the caller. The
+** caller should invoke jsonParseFree() on the return value when it
+** has finished using it.
+**
+** If any errors are detected, an appropriate error messages is set
+** using sqlite3_result_error() or the equivalent and this routine
+** returns NULL. This routine also returns NULL if the pArg argument
+** is an SQL NULL value, but no error message is set in that case. This
+** is so that SQL functions that are given NULL arguments will return
+** a NULL value.
*/
-static void jsonRemoveAllNulls(JsonNode *pNode){
- int i, n;
- assert( pNode->eType==JSON_OBJECT );
- n = pNode->n;
- for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
- switch( pNode[i].eType ){
- case JSON_NULL:
- pNode[i].jnFlags |= JNODE_REMOVE;
- break;
- case JSON_OBJECT:
- jsonRemoveAllNulls(&pNode[i]);
- break;
+static JsonParse *jsonParseFuncArg(
+ sqlite3_context *ctx,
+ sqlite3_value *pArg,
+ u32 flgs
+){
+ int eType; /* Datatype of pArg */
+ JsonParse *p = 0; /* Value to be returned */
+ JsonParse *pFromCache = 0; /* Value taken from cache */
+ sqlite3 *db; /* The database connection */
+
+ assert( ctx!=0 );
+ eType = sqlite3_value_type(pArg);
+ if( eType==SQLITE_NULL ){
+ return 0;
+ }
+ pFromCache = jsonCacheSearch(ctx, pArg);
+ if( pFromCache ){
+ pFromCache->nJPRef++;
+ if( (flgs & JSON_EDITABLE)==0 ){
+ return pFromCache;
}
}
-}
+ db = sqlite3_context_db_handle(ctx);
+rebuild_from_cache:
+ p = sqlite3DbMallocZero(db, sizeof(*p));
+ if( p==0 ) goto json_pfa_oom;
+ memset(p, 0, sizeof(*p));
+ p->db = db;
+ p->nJPRef = 1;
+ if( pFromCache!=0 ){
+ u32 nBlob = pFromCache->nBlob;
+ p->aBlob = sqlite3DbMallocRaw(db, nBlob);
+ if( p->aBlob==0 ) goto json_pfa_oom;
+ memcpy(p->aBlob, pFromCache->aBlob, nBlob);
+ p->nBlobAlloc = p->nBlob = nBlob;
+ p->hasNonstd = pFromCache->hasNonstd;
+ jsonParseFree(pFromCache);
+ return p;
+ }
+ if( eType==SQLITE_BLOB ){
+ if( jsonArgIsJsonb(pArg,p) ){
+ if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){
+ goto json_pfa_oom;
+ }
+ return p;
+ }
+ /* If the blob is not valid JSONB, fall through into trying to cast
+ ** the blob into text which is then interpreted as JSON. (tag-20240123-a)
+ **
+ ** This goes against all historical documentation about how the SQLite
+ ** JSON functions were suppose to work. From the beginning, blob was
+ ** reserved for expansion and a blob value should have raised an error.
+ ** But it did not, due to a bug. And many applications came to depend
+ ** upon this buggy behavior, espeically when using the CLI and reading
+ ** JSON text using readfile(), which returns a blob. For this reason
+ ** we will continue to support the bug moving forward.
+ ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d
+ */
+ }
+ p->zJson = (char*)sqlite3_value_text(pArg);
+ p->nJson = sqlite3_value_bytes(pArg);
+ if( p->nJson==0 ) goto json_pfa_malformed;
+ if( NEVER(p->zJson==0) ) goto json_pfa_oom;
+ if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ return 0;
+ }
+ }else{
+ int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref);
+ int rc;
+ if( !isRCStr ){
+ char *zNew = sqlite3RCStrNew( p->nJson );
+ if( zNew==0 ) goto json_pfa_oom;
+ memcpy(zNew, p->zJson, p->nJson);
+ p->zJson = zNew;
+ p->zJson[p->nJson] = 0;
+ }else{
+ sqlite3RCStrRef(p->zJson);
+ }
+ p->bJsonIsRCStr = 1;
+ rc = jsonCacheInsert(ctx, p);
+ if( rc==SQLITE_NOMEM ) goto json_pfa_oom;
+ if( flgs & JSON_EDITABLE ){
+ pFromCache = p;
+ p = 0;
+ goto rebuild_from_cache;
+ }
+ }
+ return p;
+json_pfa_malformed:
+ if( flgs & JSON_KEEPERROR ){
+ p->nErr = 1;
+ return p;
+ }else{
+ jsonParseFree(p);
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ return 0;
+ }
-/****************************************************************************
-** SQL functions used for testing and debugging
-****************************************************************************/
+json_pfa_oom:
+ jsonParseFree(pFromCache);
+ jsonParseFree(p);
+ sqlite3_result_error_nomem(ctx);
+ return 0;
+}
-#ifdef SQLITE_DEBUG
/*
-** The json_parse(JSON) function returns a string which describes
-** a parse of the JSON provided. Or it returns NULL if JSON is not
-** well-formed.
+** Make the return value of a JSON function either the raw JSONB blob
+** or make it JSON text, depending on whether the JSON_BLOB flag is
+** set on the function.
*/
-static void jsonParseFunc(
+static void jsonReturnParse(
sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
+ JsonParse *p
){
- JsonString s; /* Output string - not real JSON */
- JsonParse x; /* The parse */
- u32 i;
-
- assert( argc==1 );
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- jsonParseFindParents(&x);
- jsonInit(&s, ctx);
- for(i=0; i<x.nNode; i++){
- const char *zType;
- if( x.aNode[i].jnFlags & JNODE_LABEL ){
- assert( x.aNode[i].eType==JSON_STRING );
- zType = "label";
+ int flgs;
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ return;
+ }
+ flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( flgs & JSON_BLOB ){
+ if( p->nBlobAlloc>0 && !p->bReadOnly ){
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC);
+ p->nBlobAlloc = 0;
}else{
- zType = jsonType[x.aNode[i].eType];
+ sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT);
}
- jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d",
- i, zType, x.aNode[i].n, x.aUp[i]);
- assert( x.aNode[i].eU==0 || x.aNode[i].eU==1 );
- if( x.aNode[i].u.zJContent!=0 ){
- assert( x.aNode[i].eU==1 );
- jsonAppendRaw(&s, " ", 1);
- jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n);
- }else{
- assert( x.aNode[i].eU==0 );
+ }else{
+ JsonString s;
+ jsonStringInit(&s, ctx);
+ p->delta = 0;
+ jsonTranslateBlobToText(p, 0, &s);
+ jsonReturnString(&s, p, ctx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+}
+
+/****************************************************************************
+** SQL functions used for testing and debugging
+****************************************************************************/
+
+#if SQLITE_DEBUG
+/*
+** Decode JSONB bytes in aBlob[] starting at iStart through but not
+** including iEnd. Indent the
+** content by nIndent spaces.
+*/
+static void jsonDebugPrintBlob(
+ JsonParse *pParse, /* JSON content */
+ u32 iStart, /* Start rendering here */
+ u32 iEnd, /* Do not render this byte or any byte after this one */
+ int nIndent, /* Indent by this many spaces */
+ sqlite3_str *pOut /* Generate output into this sqlite3_str object */
+){
+ while( iStart<iEnd ){
+ u32 i, n, nn, sz = 0;
+ int showContent = 1;
+ u8 x = pParse->aBlob[iStart] & 0x0f;
+ u32 savedNBlob = pParse->nBlob;
+ sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, "");
+ if( pParse->nBlobAlloc>pParse->nBlob ){
+ pParse->nBlob = pParse->nBlobAlloc;
+ }
+ nn = n = jsonbPayloadSize(pParse, iStart, &sz);
+ if( nn==0 ) nn = 1;
+ if( sz>0 && x<JSONB_ARRAY ){
+ nn += sz;
+ }
+ for(i=0; i<nn; i++){
+ sqlite3_str_appendf(pOut, " %02x", pParse->aBlob[iStart+i]);
+ }
+ if( n==0 ){
+ sqlite3_str_appendf(pOut, " ERROR invalid node size\n");
+ iStart = n==0 ? iStart+1 : iEnd;
+ continue;
}
- jsonAppendRaw(&s, "\n", 1);
+ pParse->nBlob = savedNBlob;
+ if( iStart+n+sz>iEnd ){
+ iEnd = iStart+n+sz;
+ if( iEnd>pParse->nBlob ){
+ if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){
+ iEnd = pParse->nBlobAlloc;
+ }else{
+ iEnd = pParse->nBlob;
+ }
+ }
+ }
+ sqlite3_str_appendall(pOut," <-- ");
+ switch( x ){
+ case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break;
+ case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break;
+ case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break;
+ case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break;
+ case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break;
+ case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break;
+ case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break;
+ case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break;
+ case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break;
+ case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break;
+ case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break;
+ case JSONB_ARRAY: {
+ sqlite3_str_appendf(pOut,"array, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ case JSONB_OBJECT: {
+ sqlite3_str_appendf(pOut, "object, %u bytes\n", sz);
+ jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut);
+ showContent = 0;
+ break;
+ }
+ default: {
+ sqlite3_str_appendall(pOut, "ERROR: unknown node type\n");
+ showContent = 0;
+ break;
+ }
+ }
+ if( showContent ){
+ if( sz==0 && x<=JSONB_FALSE ){
+ sqlite3_str_append(pOut, "\n", 1);
+ }else{
+ u32 i;
+ sqlite3_str_appendall(pOut, ": \"");
+ for(i=iStart+n; i<iStart+n+sz; i++){
+ u8 c = pParse->aBlob[i];
+ if( c<0x20 || c>=0x7f ) c = '.';
+ sqlite3_str_append(pOut, (char*)&c, 1);
+ }
+ sqlite3_str_append(pOut, "\"\n", 2);
+ }
+ }
+ iStart += n + sz;
+ }
+}
+static void jsonShowParse(JsonParse *pParse){
+ sqlite3_str out;
+ char zBuf[1000];
+ if( pParse==0 ){
+ printf("NULL pointer\n");
+ return;
+ }else{
+ printf("nBlobAlloc = %u\n", pParse->nBlobAlloc);
+ printf("nBlob = %u\n", pParse->nBlob);
+ printf("delta = %d\n", pParse->delta);
+ if( pParse->nBlob==0 ) return;
+ printf("content (bytes 0..%u):\n", pParse->nBlob-1);
}
- jsonParseReset(&x);
- jsonResult(&s);
+ sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000);
+ jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out);
+ printf("%s", sqlite3_str_value(&out));
+ sqlite3_str_reset(&out);
}
+#endif /* SQLITE_DEBUG */
+#ifdef SQLITE_DEBUG
/*
-** The json_test1(JSON) function return true (1) if the input is JSON
-** text generated by another json function. It returns (0) if the input
-** is not known to be JSON.
+** SQL function: json_parse(JSON)
+**
+** Parse JSON using jsonParseFuncArg(). Return text that is a
+** human-readable dump of the binary JSONB for the input parameter.
*/
-static void jsonTest1Func(
+static void jsonParseFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- UNUSED_PARAMETER(argc);
- sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE);
+ JsonParse *p; /* The parse */
+ sqlite3_str out;
+
+ assert( argc>=1 );
+ sqlite3StrAccumInit(&out, 0, 0, 0, 1000000);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
+ if( p==0 ) return;
+ if( argc==1 ){
+ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out);
+ sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8);
+ }else{
+ jsonShowParse(p);
+ }
+ jsonParseFree(p);
}
#endif /* SQLITE_DEBUG */
@@ -201757,7 +206743,7 @@ static void jsonTest1Func(
****************************************************************************/
/*
-** Implementation of the json_QUOTE(VALUE) function. Return a JSON value
+** Implementation of the json_quote(VALUE) function. Return a JSON value
** corresponding to the SQL value input. Mostly this means putting
** double-quotes around strings and returning the unquoted string "null"
** when given a NULL input.
@@ -201770,9 +206756,9 @@ static void jsonQuoteFunc(
JsonString jx;
UNUSED_PARAMETER(argc);
- jsonInit(&jx, ctx);
- jsonAppendValue(&jx, argv[0]);
- jsonResult(&jx);
+ jsonStringInit(&jx, ctx);
+ jsonAppendSqlValue(&jx, argv[0]);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
@@ -201789,18 +206775,17 @@ static void jsonArrayFunc(
int i;
JsonString jx;
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '[');
for(i=0; i<argc; i++){
jsonAppendSeparator(&jx);
- jsonAppendValue(&jx, argv[i]);
+ jsonAppendSqlValue(&jx, argv[i]);
}
jsonAppendChar(&jx, ']');
- jsonResult(&jx);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
-
/*
** json_array_length(JSON)
** json_array_length(JSON, PATH)
@@ -201814,39 +206799,53 @@ static void jsonArrayLengthFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- sqlite3_int64 n = 0;
+ sqlite3_int64 cnt = 0;
u32 i;
- JsonNode *pNode;
+ u8 eErr = 0;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- assert( p->nNode );
if( argc==2 ){
const char *zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ){
+ jsonParseFree(p);
+ return;
+ }
+ i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ eErr = 1;
+ i = 0;
+ }
}else{
- pNode = p->aNode;
+ i = 0;
}
- if( pNode==0 ){
- return;
+ if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){
+ cnt = jsonbArrayCount(p, i);
}
- if( pNode->eType==JSON_ARRAY ){
- assert( (pNode->jnFlags & JNODE_APPEND)==0 );
- for(i=1; i<=pNode->n; n++){
- i += jsonNodeSize(&pNode[i]);
- }
- }
- sqlite3_result_int64(ctx, n);
+ if( !eErr ) sqlite3_result_int64(ctx, cnt);
+ jsonParseFree(p);
}
-/*
-** Bit values for the flags passed into jsonExtractFunc() or
-** jsonSetFunc() via the user-data value.
-*/
-#define JSON_JSON 0x01 /* Result is always JSON */
-#define JSON_SQL 0x02 /* Result is always SQL */
-#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */
-#define JSON_ISSET 0x04 /* json_set(), not json_insert() */
+/* True if the string is all digits */
+static int jsonAllDigits(const char *z, int n){
+ int i;
+ for(i=0; i<n && sqlite3Isdigit(z[i]); i++){}
+ return i==n;
+}
+
+/* True if the string is all alphanumerics and underscores */
+static int jsonAllAlphanum(const char *z, int n){
+ int i;
+ for(i=0; i<n && (sqlite3Isalnum(z[i]) || z[i]=='_'); i++){}
+ return i==n;
+}
/*
** json_extract(JSON, PATH, ...)
@@ -201873,159 +206872,307 @@ static void jsonExtractFunc(
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
- JsonString jx;
+ JsonParse *p = 0; /* The parse */
+ int flags; /* Flags associated with the function */
+ int i; /* Loop counter */
+ JsonString jx; /* String for array result */
if( argc<2 ) return;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
- if( argc==2 ){
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ jsonStringInit(&jx, ctx);
+ if( argc>2 ){
+ jsonAppendChar(&jx, '[');
+ }
+ for(i=1; i<argc; i++){
/* With a single PATH argument */
- zPath = (const char*)sqlite3_value_text(argv[1]);
- if( zPath==0 ) return;
- if( flags & JSON_ABPATH ){
- if( zPath[0]!='$' || (zPath[1]!='.' && zPath[1]!='[' && zPath[1]!=0) ){
- /* The -> and ->> operators accept abbreviated PATH arguments. This
- ** is mostly for compatibility with PostgreSQL, but also for
- ** convenience.
- **
- ** NUMBER ==> $[NUMBER] // PG compatible
- ** LABEL ==> $.LABEL // PG compatible
- ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
- */
- jsonInit(&jx, ctx);
- if( sqlite3Isdigit(zPath[0]) ){
- jsonAppendRaw(&jx, "$[", 2);
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendRaw(&jx, "]", 2);
- }else{
- jsonAppendRaw(&jx, "$.", 1 + (zPath[0]!='['));
- jsonAppendRaw(&jx, zPath, (int)strlen(zPath));
- jsonAppendChar(&jx, 0);
- }
- pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx);
- jsonReset(&jx);
+ const char *zPath = (const char*)sqlite3_value_text(argv[i]);
+ int nPath;
+ u32 j;
+ if( zPath==0 ) goto json_extract_error;
+ nPath = sqlite3Strlen30(zPath);
+ if( zPath[0]=='$' ){
+ j = jsonLookupStep(p, 0, zPath+1, 0);
+ }else if( (flags & JSON_ABPATH) ){
+ /* The -> and ->> operators accept abbreviated PATH arguments. This
+ ** is mostly for compatibility with PostgreSQL, but also for
+ ** convenience.
+ **
+ ** NUMBER ==> $[NUMBER] // PG compatible
+ ** LABEL ==> $.LABEL // PG compatible
+ ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience
+ */
+ jsonStringInit(&jx, ctx);
+ if( jsonAllDigits(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, "[", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "]", 2);
+ }else if( jsonAllAlphanum(zPath, nPath) ){
+ jsonAppendRawNZ(&jx, ".", 1);
+ jsonAppendRaw(&jx, zPath, nPath);
+ }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){
+ jsonAppendRaw(&jx, zPath, nPath);
}else{
- pNode = jsonLookup(p, zPath, 0, ctx);
+ jsonAppendRawNZ(&jx, ".\"", 2);
+ jsonAppendRaw(&jx, zPath, nPath);
+ jsonAppendRawNZ(&jx, "\"", 1);
}
- if( pNode ){
+ jsonStringTerminate(&jx);
+ j = jsonLookupStep(p, 0, jx.zBuf, 0);
+ jsonStringReset(&jx);
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
+ }
+ if( j<p->nBlob ){
+ if( argc==2 ){
if( flags & JSON_JSON ){
- jsonReturnJson(pNode, ctx, 0);
+ jsonStringInit(&jx, ctx);
+ jsonTranslateBlobToText(p, j, &jx);
+ jsonReturnString(&jx, 0, 0);
+ jsonStringReset(&jx);
+ assert( (flags & JSON_BLOB)==0 );
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}else{
- jsonReturn(pNode, ctx, 0);
- sqlite3_result_subtype(ctx, 0);
+ jsonReturnFromBlob(p, j, ctx, 0);
+ if( (flags & (JSON_SQL|JSON_BLOB))==0
+ && (p->aBlob[j]&0x0f)>=JSONB_ARRAY
+ ){
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
}
+ }else{
+ jsonAppendSeparator(&jx);
+ jsonTranslateBlobToText(p, j, &jx);
}
- }else{
- pNode = jsonLookup(p, zPath, 0, ctx);
- if( p->nErr==0 && pNode ) jsonReturn(pNode, ctx, 0);
- }
- }else{
- /* Two or more PATH arguments results in a JSON array with each
- ** element of the array being the value selected by one of the PATHs */
- int i;
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=1; i<argc; i++){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(p, zPath, 0, ctx);
- if( p->nErr ) break;
- jsonAppendSeparator(&jx);
- if( pNode ){
- jsonRenderNode(pNode, &jx, 0);
+ }else if( j==JSON_LOOKUP_NOTFOUND ){
+ if( argc==2 ){
+ goto json_extract_error; /* Return NULL if not found */
}else{
- jsonAppendRaw(&jx, "null", 4);
+ jsonAppendSeparator(&jx);
+ jsonAppendRawNZ(&jx, "null", 4);
}
+ }else if( j==JSON_LOOKUP_ERROR ){
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ goto json_extract_error;
+ }else{
+ jsonBadPathError(ctx, zPath);
+ goto json_extract_error;
}
- if( i==argc ){
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
+ }
+ if( argc>2 ){
+ jsonAppendChar(&jx, ']');
+ jsonReturnString(&jx, 0, 0);
+ if( (flags & JSON_BLOB)==0 ){
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
- jsonReset(&jx);
}
+json_extract_error:
+ jsonStringReset(&jx);
+ jsonParseFree(p);
+ return;
}
-/* This is the RFC 7396 MergePatch algorithm.
-*/
-static JsonNode *jsonMergePatch(
- JsonParse *pParse, /* The JSON parser that contains the TARGET */
- u32 iTarget, /* Node of the TARGET in pParse */
- JsonNode *pPatch /* The PATCH */
-){
- u32 i, j;
- u32 iRoot;
- JsonNode *pTarget;
- if( pPatch->eType!=JSON_OBJECT ){
- return pPatch;
- }
- assert( iTarget<pParse->nNode );
- pTarget = &pParse->aNode[iTarget];
- assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
- if( pTarget->eType!=JSON_OBJECT ){
- jsonRemoveAllNulls(pPatch);
- return pPatch;
- }
- iRoot = iTarget;
- for(i=1; i<pPatch->n; i += jsonNodeSize(&pPatch[i+1])+1){
- u32 nKey;
- const char *zKey;
- assert( pPatch[i].eType==JSON_STRING );
- assert( pPatch[i].jnFlags & JNODE_LABEL );
- assert( pPatch[i].eU==1 );
- nKey = pPatch[i].n;
- zKey = pPatch[i].u.zJContent;
- for(j=1; j<pTarget->n; j += jsonNodeSize(&pTarget[j+1])+1 ){
- assert( pTarget[j].eType==JSON_STRING );
- assert( pTarget[j].jnFlags & JNODE_LABEL );
- if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){
- if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break;
- if( pPatch[i+1].eType==JSON_NULL ){
- pTarget[j+1].jnFlags |= JNODE_REMOVE;
- }else{
- JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
- if( pNew==0 ) return 0;
- pTarget = &pParse->aNode[iTarget];
- if( pNew!=&pTarget[j+1] ){
- assert( pTarget[j+1].eU==0
- || pTarget[j+1].eU==1
- || pTarget[j+1].eU==2 );
- testcase( pTarget[j+1].eU==1 );
- testcase( pTarget[j+1].eU==2 );
- VVA( pTarget[j+1].eU = 5 );
- pTarget[j+1].u.pPatch = pNew;
- pTarget[j+1].jnFlags |= JNODE_PATCH;
- }
- }
- break;
+/*
+** Return codes for jsonMergePatch()
+*/
+#define JSON_MERGE_OK 0 /* Success */
+#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */
+#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */
+#define JSON_MERGE_OOM 3 /* Out-of-memory condition */
+
+/*
+** RFC-7396 MergePatch for two JSONB blobs.
+**
+** pTarget is the target. pPatch is the patch. The target is updated
+** in place. The patch is read-only.
+**
+** The original RFC-7396 algorithm is this:
+**
+** define MergePatch(Target, Patch):
+** if Patch is an Object:
+** if Target is not an Object:
+** Target = {} # Ignore the contents and set it to an empty Object
+** for each Name/Value pair in Patch:
+** if Value is null:
+** if Name exists in Target:
+** remove the Name/Value pair from Target
+** else:
+** Target[Name] = MergePatch(Target[Name], Value)
+** return Target
+** else:
+** return Patch
+**
+** Here is an equivalent algorithm restructured to show the actual
+** implementation:
+**
+** 01 define MergePatch(Target, Patch):
+** 02 if Patch is not an Object:
+** 03 return Patch
+** 04 else: // if Patch is an Object
+** 05 if Target is not an Object:
+** 06 Target = {}
+** 07 for each Name/Value pair in Patch:
+** 08 if Name exists in Target:
+** 09 if Value is null:
+** 10 remove the Name/Value pair from Target
+** 11 else
+** 12 Target[name] = MergePatch(Target[Name], Value)
+** 13 else if Value is not NULL:
+** 14 if Value is not an Object:
+** 15 Target[name] = Value
+** 16 else:
+** 17 Target[name] = MergePatch('{}',value)
+** 18 return Target
+** |
+** ^---- Line numbers referenced in comments in the implementation
+*/
+static int jsonMergePatch(
+ JsonParse *pTarget, /* The JSON parser that contains the TARGET */
+ u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */
+ const JsonParse *pPatch, /* The PATCH */
+ u32 iPatch /* Index of PATCH in pPatch->aBlob[] */
+){
+ u8 x; /* Type of a single node */
+ u32 n, sz=0; /* Return values from jsonbPayloadSize() */
+ u32 iTCursor; /* Cursor position while scanning the target object */
+ u32 iTStart; /* First label in the target object */
+ u32 iTEndBE; /* Original first byte past end of target, before edit */
+ u32 iTEnd; /* Current first byte past end of target */
+ u8 eTLabel; /* Node type of the target label */
+ u32 iTLabel = 0; /* Index of the label */
+ u32 nTLabel = 0; /* Header size in bytes for the target label */
+ u32 szTLabel = 0; /* Size of the target label payload */
+ u32 iTValue = 0; /* Index of the target value */
+ u32 nTValue = 0; /* Header size of the target value */
+ u32 szTValue = 0; /* Payload size for the target value */
+
+ u32 iPCursor; /* Cursor position while scanning the patch */
+ u32 iPEnd; /* First byte past the end of the patch */
+ u8 ePLabel; /* Node type of the patch label */
+ u32 iPLabel; /* Start of patch label */
+ u32 nPLabel; /* Size of header on the patch label */
+ u32 szPLabel; /* Payload size of the patch label */
+ u32 iPValue; /* Start of patch value */
+ u32 nPValue; /* Header size for the patch value */
+ u32 szPValue; /* Payload size of the patch value */
+
+ assert( iTarget>=0 && iTarget<pTarget->nBlob );
+ assert( iPatch>=0 && iPatch<pPatch->nBlob );
+ x = pPatch->aBlob[iPatch] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */
+ u32 szPatch; /* Total size of the patch, header+payload */
+ u32 szTarget; /* Total size of the target, header+payload */
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ szPatch = n+sz;
+ sz = 0;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ szTarget = n+sz;
+ jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */
+ }
+ x = pTarget->aBlob[iTarget] & 0x0f;
+ if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0);
+ x = pTarget->aBlob[iTarget];
+ pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT;
+ }
+ n = jsonbPayloadSize(pPatch, iPatch, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPatch+n;
+ iPEnd = iPCursor+sz;
+ n = jsonbPayloadSize(pTarget, iTarget, &sz);
+ if( NEVER(n==0) ) return JSON_MERGE_BADTARGET;
+ iTStart = iTarget+n;
+ iTEndBE = iTStart+sz;
+
+ while( iPCursor<iPEnd ){ /* Algorithm line 07 */
+ iPLabel = iPCursor;
+ ePLabel = pPatch->aBlob[iPCursor] & 0x0f;
+ if( ePLabel<JSONB_TEXT || ePLabel>JSONB_TEXTRAW ){
+ return JSON_MERGE_BADPATCH;
+ }
+ nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel);
+ if( nPLabel==0 ) return JSON_MERGE_BADPATCH;
+ iPValue = iPCursor + nPLabel + szPLabel;
+ if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH;
+ nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue);
+ if( nPValue==0 ) return JSON_MERGE_BADPATCH;
+ iPCursor = iPValue + nPValue + szPValue;
+ if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH;
+
+ iTCursor = iTStart;
+ iTEnd = iTEndBE + pTarget->delta;
+ while( iTCursor<iTEnd ){
+ int isEqual; /* true if the patch and target labels match */
+ iTLabel = iTCursor;
+ eTLabel = pTarget->aBlob[iTCursor] & 0x0f;
+ if( eTLabel<JSONB_TEXT || eTLabel>JSONB_TEXTRAW ){
+ return JSON_MERGE_BADTARGET;
+ }
+ nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel);
+ if( nTLabel==0 ) return JSON_MERGE_BADTARGET;
+ iTValue = iTLabel + nTLabel + szTLabel;
+ if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET;
+ nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue);
+ if( nTValue==0 ) return JSON_MERGE_BADTARGET;
+ if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET;
+ isEqual = jsonLabelCompare(
+ (const char*)&pPatch->aBlob[iPLabel+nPLabel],
+ szPLabel,
+ (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW),
+ (const char*)&pTarget->aBlob[iTLabel+nTLabel],
+ szTLabel,
+ (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW));
+ if( isEqual ) break;
+ iTCursor = iTValue + nTValue + szTValue;
+ }
+ x = pPatch->aBlob[iPValue] & 0x0f;
+ if( iTCursor<iTEnd ){
+ /* A match was found. Algorithm line 08 */
+ if( x==0 ){
+ /* Patch value is NULL. Algorithm line 09 */
+ jsonBlobEdit(pTarget, iTLabel, nTLabel+szTLabel+nTValue+szTValue, 0,0);
+ /* vvvvvv----- No OOM on a delete-only edit */
+ if( NEVER(pTarget->oom) ) return JSON_MERGE_OOM;
+ }else{
+ /* Algorithm line 12 */
+ int rc, savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
+ }
+ }else if( x>0 ){ /* Algorithm line 13 */
+ /* No match and patch value is not NULL */
+ u32 szNew = szPLabel+nPLabel;
+ if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ memcpy(&pTarget->aBlob[iTEnd+szNew],
+ &pPatch->aBlob[iPValue], szPValue+nPValue);
+ }else{
+ int rc, savedDelta;
+ jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1);
+ if( pTarget->oom ) return JSON_MERGE_OOM;
+ memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew);
+ pTarget->aBlob[iTEnd+szNew] = 0x00;
+ savedDelta = pTarget->delta;
+ pTarget->delta = 0;
+ rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue);
+ if( rc ) return rc;
+ pTarget->delta += savedDelta;
}
}
- if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
- int iStart, iPatch;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
- if( pParse->oom ) return 0;
- jsonRemoveAllNulls(pPatch);
- pTarget = &pParse->aNode[iTarget];
- assert( pParse->aNode[iRoot].eU==0 || pParse->aNode[iRoot].eU==2 );
- testcase( pParse->aNode[iRoot].eU==2 );
- pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
- VVA( pParse->aNode[iRoot].eU = 2 );
- pParse->aNode[iRoot].u.iAppend = iStart - iRoot;
- iRoot = iStart;
- assert( pParse->aNode[iPatch].eU==0 );
- VVA( pParse->aNode[iPatch].eU = 5 );
- pParse->aNode[iPatch].jnFlags |= JNODE_PATCH;
- pParse->aNode[iPatch].u.pPatch = &pPatch[i+1];
- }
}
- return pTarget;
+ if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget);
+ return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK;
}
+
/*
** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
** object that is the result of running the RFC 7396 MergePatch() algorithm
@@ -202036,25 +207183,27 @@ static void jsonPatchFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The JSON that is being patched */
- JsonParse y; /* The patch */
- JsonNode *pResult; /* The result of the merge */
+ JsonParse *pTarget; /* The TARGET */
+ JsonParse *pPatch; /* The PATCH */
+ int rc; /* Result code */
UNUSED_PARAMETER(argc);
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){
- jsonParseReset(&x);
- return;
- }
- pResult = jsonMergePatch(&x, 0, y.aNode);
- assert( pResult!=0 || x.oom );
- if( pResult ){
- jsonReturnJson(pResult, ctx, 0);
- }else{
- sqlite3_result_error_nomem(ctx);
+ assert( argc==2 );
+ pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE);
+ if( pTarget==0 ) return;
+ pPatch = jsonParseFuncArg(ctx, argv[1], 0);
+ if( pPatch ){
+ rc = jsonMergePatch(pTarget, 0, pPatch, 0);
+ if( rc==JSON_MERGE_OK ){
+ jsonReturnParse(ctx, pTarget);
+ }else if( rc==JSON_MERGE_OOM ){
+ sqlite3_result_error_nomem(ctx);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ jsonParseFree(pPatch);
}
- jsonParseReset(&x);
- jsonParseReset(&y);
+ jsonParseFree(pTarget);
}
@@ -202078,23 +207227,23 @@ static void jsonObjectFunc(
"of arguments", -1);
return;
}
- jsonInit(&jx, ctx);
+ jsonStringInit(&jx, ctx);
jsonAppendChar(&jx, '{');
for(i=0; i<argc; i+=2){
if( sqlite3_value_type(argv[i])!=SQLITE_TEXT ){
sqlite3_result_error(ctx, "json_object() labels must be TEXT", -1);
- jsonReset(&jx);
+ jsonStringReset(&jx);
return;
}
jsonAppendSeparator(&jx);
z = (const char*)sqlite3_value_text(argv[i]);
- n = (u32)sqlite3_value_bytes(argv[i]);
+ n = sqlite3_value_bytes(argv[i]);
jsonAppendString(&jx, z, n);
jsonAppendChar(&jx, ':');
- jsonAppendValue(&jx, argv[i+1]);
+ jsonAppendSqlValue(&jx, argv[i+1]);
}
jsonAppendChar(&jx, '}');
- jsonResult(&jx);
+ jsonReturnString(&jx, 0, 0);
sqlite3_result_subtype(ctx, JSON_SUBTYPE);
}
@@ -202110,26 +207259,50 @@ static void jsonRemoveFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
+ JsonParse *p; /* The parse */
+ const char *zPath = 0; /* Path of element to be removed */
+ int i; /* Loop counter */
+ u32 rc; /* Subroutine return code */
if( argc<1 ) return;
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i++){
+ p = jsonParseFuncArg(ctx, argv[0], argc>1 ? JSON_EDITABLE : 0);
+ if( p==0 ) return;
+ for(i=1; i<argc; i++){
zPath = (const char*)sqlite3_value_text(argv[i]);
- if( zPath==0 ) goto remove_done;
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto remove_done;
- if( pNode ) pNode->jnFlags |= JNODE_REMOVE;
- }
- if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
- jsonReturnJson(x.aNode, ctx, 0);
+ if( zPath==0 ){
+ goto json_remove_done;
+ }
+ if( zPath[0]!='$' ){
+ goto json_remove_patherror;
+ }
+ if( zPath[1]==0 ){
+ /* json_remove(j,'$') returns NULL */
+ goto json_remove_done;
+ }
+ p->eEdit = JEDIT_DEL;
+ p->delta = 0;
+ rc = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(rc) ){
+ if( rc==JSON_LOOKUP_NOTFOUND ){
+ continue; /* No-op */
+ }else if( rc==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_remove_done;
+ }
}
-remove_done:
- jsonParseReset(&x);
+ jsonReturnParse(ctx, p);
+ jsonParseFree(p);
+ return;
+
+json_remove_patherror:
+ jsonBadPathError(ctx, zPath);
+
+json_remove_done:
+ jsonParseFree(p);
+ return;
}
/*
@@ -202143,38 +207316,12 @@ static void jsonReplaceFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, "replace");
return;
}
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto replace_err;
- if( pNode ){
- assert( pNode->eU==0 || pNode->eU==1 || pNode->eU==4 );
- testcase( pNode->eU!=0 && pNode->eU!=1 );
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- VVA( pNode->eU = 4 );
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- assert( x.aNode[0].eU==4 );
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-replace_err:
- jsonParseReset(&x);
+ jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL);
}
@@ -202195,45 +207342,16 @@ static void jsonSetFunc(
int argc,
sqlite3_value **argv
){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
- int bApnd;
- int bIsSet = sqlite3_user_data(ctx)!=0;
+
+ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ int bIsSet = (flags&JSON_ISSET)!=0;
if( argc<1 ) return;
if( (argc&1)==0 ) {
jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
return;
}
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- bApnd = 0;
- pNode = jsonLookup(&x, zPath, &bApnd, ctx);
- if( x.oom ){
- sqlite3_result_error_nomem(ctx);
- goto jsonSetDone;
- }else if( x.nErr ){
- goto jsonSetDone;
- }else if( pNode && (bApnd || bIsSet) ){
- testcase( pNode->eU!=0 && pNode->eU!=1 );
- assert( pNode->eU!=3 && pNode->eU!=5 );
- VVA( pNode->eU = 4 );
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- assert( x.aNode[0].eU==4 );
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-jsonSetDone:
- jsonParseReset(&x);
+ jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS);
}
/*
@@ -202249,27 +207367,93 @@ static void jsonTypeFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- const char *zPath;
- JsonNode *pNode;
+ const char *zPath = 0;
+ u32 i;
- p = jsonParseCached(ctx, argv, ctx);
+ p = jsonParseFuncArg(ctx, argv[0], 0);
if( p==0 ) return;
if( argc==2 ){
zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
+ if( zPath==0 ) goto json_type_done;
+ if( zPath[0]!='$' ){
+ jsonBadPathError(ctx, zPath);
+ goto json_type_done;
+ }
+ i = jsonLookupStep(p, 0, zPath+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ /* no-op */
+ }else if( i==JSON_LOOKUP_PATHERROR ){
+ jsonBadPathError(ctx, zPath);
+ }else{
+ sqlite3_result_error(ctx, "malformed JSON", -1);
+ }
+ goto json_type_done;
+ }
}else{
- pNode = p->aNode;
- }
- if( pNode ){
- sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
+ i = 0;
}
+ sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC);
+json_type_done:
+ jsonParseFree(p);
}
/*
** json_valid(JSON)
-**
-** Return 1 if JSON is a well-formed canonical JSON string according
-** to RFC-7159. Return 0 otherwise.
+** json_valid(JSON, FLAGS)
+**
+** Check the JSON argument to see if it is well-formed. The FLAGS argument
+** encodes the various constraints on what is meant by "well-formed":
+**
+** 0x01 Canonical RFC-8259 JSON text
+** 0x02 JSON text with optional JSON-5 extensions
+** 0x04 Superficially appears to be JSONB
+** 0x08 Strictly well-formed JSONB
+**
+** If the FLAGS argument is omitted, it defaults to 1. Useful values for
+** FLAGS include:
+**
+** 1 Strict canonical JSON text
+** 2 JSON text perhaps with JSON-5 extensions
+** 4 Superficially appears to be JSONB
+** 5 Canonical JSON text or superficial JSONB
+** 6 JSON-5 text or superficial JSONB
+** 8 Strict JSONB
+** 9 Canonical JSON text or strict JSONB
+** 10 JSON-5 text or strict JSONB
+**
+** Other flag combinations are redundant. For example, every canonical
+** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3
+** are the same. Similarly, any input that passes a strict JSONB validation
+** will also pass the superficial validation so 12 through 15 are the same
+** as 8 through 11 respectively.
+**
+** This routine runs in linear time to validate text and when doing strict
+** JSONB validation. Superficial JSONB validation is constant time,
+** assuming the BLOB is already in memory. The performance advantage
+** of superficial JSONB validation is why that option is provided.
+** Application developers can choose to do fast superficial validation or
+** slower strict validation, according to their specific needs.
+**
+** Only the lower four bits of the FLAGS argument are currently used.
+** Higher bits are reserved for future expansion. To facilitate
+** compatibility, the current implementation raises an error if any bit
+** in FLAGS is set other than the lower four bits.
+**
+** The original circa 2015 implementation of the JSON routines in
+** SQLite only supported canonical RFC-8259 JSON text and the json_valid()
+** function only accepted one argument. That is why the default value
+** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only
+** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS
+** argument was added when the JSON routines were extended to support
+** JSON5-like extensions and binary JSONB stored in BLOBs.
+**
+** Return Values:
+**
+** * Raise an error if FLAGS is outside the range of 1 to 15.
+** * Return NULL if the input is NULL
+** * Return 1 if the input is well-formed.
+** * Return 0 if the input is not well-formed.
*/
static void jsonValidFunc(
sqlite3_context *ctx,
@@ -202277,73 +207461,128 @@ static void jsonValidFunc(
sqlite3_value **argv
){
JsonParse *p; /* The parse */
- UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- p = jsonParseCached(ctx, argv, 0);
- if( p==0 || p->oom ){
- sqlite3_result_error_nomem(ctx);
- sqlite3_free(p);
- }else{
- sqlite3_result_int(ctx, p->nErr==0 && p->hasNonstd==0);
- if( p->nErr ) jsonParseFree(p);
+ u8 flags = 1;
+ u8 res = 0;
+ if( argc==2 ){
+ i64 f = sqlite3_value_int64(argv[1]);
+ if( f<1 || f>15 ){
+ sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be"
+ " between 1 and 15", -1);
+ return;
+ }
+ flags = f & 0x0f;
+ }
+ switch( sqlite3_value_type(argv[0]) ){
+ case SQLITE_NULL: {
+#ifdef SQLITE_LEGACY_JSON_VALID
+ /* Incorrect legacy behavior was to return FALSE for a NULL input */
+ sqlite3_result_int(ctx, 0);
+#endif
+ return;
+ }
+ case SQLITE_BLOB: {
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ if( flags & 0x04 ){
+ /* Superficial checking only - accomplished by the
+ ** jsonFuncArgMightBeBinary() call above. */
+ res = 1;
+ }else if( flags & 0x08 ){
+ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If
+ ** no errors occur, call that a "strict check". */
+ JsonParse px;
+ u32 iErr;
+ memset(&px, 0, sizeof(px));
+ px.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ px.nBlob = sqlite3_value_bytes(argv[0]);
+ iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1);
+ res = iErr==0;
+ }
+ break;
+ }
+ /* Fall through into interpreting the input as text. See note
+ ** above at tag-20240123-a. */
+ /* no break */ deliberate_fall_through
+ }
+ default: {
+ JsonParse px;
+ if( (flags & 0x3)==0 ) break;
+ memset(&px, 0, sizeof(px));
+
+ p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR);
+ if( p ){
+ if( p->oom ){
+ sqlite3_result_error_nomem(ctx);
+ }else if( p->nErr ){
+ /* no-op */
+ }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){
+ res = 1;
+ }
+ jsonParseFree(p);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ break;
+ }
}
+ sqlite3_result_int(ctx, res);
}
/*
** json_error_position(JSON)
**
-** If the argument is not an interpretable JSON string, then return the 1-based
-** character position at which the parser first recognized that the input
-** was in error. The left-most character is 1. If the string is valid
-** JSON, then return 0.
-**
-** Note that json_valid() is only true for strictly conforming canonical JSON.
-** But this routine returns zero if the input contains extension. Thus:
+** If the argument is NULL, return NULL
**
-** (1) If the input X is strictly conforming canonical JSON:
+** If the argument is BLOB, do a full validity check and return non-zero
+** if the check fails. The return value is the approximate 1-based offset
+** to the byte of the element that contains the first error.
**
-** json_valid(X) returns true
-** json_error_position(X) returns 0
-**
-** (2) If the input X is JSON but it includes extension (such as JSON5) that
-** are not part of RFC-8259:
-**
-** json_valid(X) returns false
-** json_error_position(X) return 0
-**
-** (3) If the input X cannot be interpreted as JSON even taking extensions
-** into account:
-**
-** json_valid(X) return false
-** json_error_position(X) returns 1 or more
+** Otherwise interpret the argument is TEXT (even if it is numeric) and
+** return the 1-based character position for where the parser first recognized
+** that the input was not valid JSON, or return 0 if the input text looks
+** ok. JSON-5 extensions are accepted.
*/
static void jsonErrorFunc(
sqlite3_context *ctx,
int argc,
sqlite3_value **argv
){
- JsonParse *p; /* The parse */
+ i64 iErrPos = 0; /* Error position to be returned */
+ JsonParse s;
+
+ assert( argc==1 );
UNUSED_PARAMETER(argc);
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- p = jsonParseCached(ctx, argv, 0);
- if( p==0 || p->oom ){
+ memset(&s, 0, sizeof(s));
+ s.db = sqlite3_context_db_handle(ctx);
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ s.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ s.nBlob = sqlite3_value_bytes(argv[0]);
+ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1);
+ }else{
+ s.zJson = (char*)sqlite3_value_text(argv[0]);
+ if( s.zJson==0 ) return; /* NULL input or OOM */
+ s.nJson = sqlite3_value_bytes(argv[0]);
+ if( jsonConvertTextToBlob(&s,0) ){
+ if( s.oom ){
+ iErrPos = -1;
+ }else{
+ /* Convert byte-offset s.iErr into a character offset */
+ u32 k;
+ assert( s.zJson!=0 ); /* Because s.oom is false */
+ for(k=0; k<s.iErr && ALWAYS(s.zJson[k]); k++){
+ if( (s.zJson[k] & 0xc0)!=0x80 ) iErrPos++;
+ }
+ iErrPos++;
+ }
+ }
+ }
+ jsonParseReset(&s);
+ if( iErrPos<0 ){
sqlite3_result_error_nomem(ctx);
- sqlite3_free(p);
- }else if( p->nErr==0 ){
- sqlite3_result_int(ctx, 0);
}else{
- int n = 1;
- u32 i;
- const char *z = p->zJson;
- for(i=0; i<p->iErr && ALWAYS(z[i]); i++){
- if( (z[i]&0xc0)!=0x80 ) n++;
- }
- sqlite3_result_int(ctx, n);
- jsonParseFree(p);
+ sqlite3_result_int64(ctx, iErrPos);
}
}
-
/****************************************************************************
** Aggregate SQL function implementations
****************************************************************************/
@@ -202362,31 +207601,42 @@ static void jsonArrayStep(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '[');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
- jsonAppendValue(pStr, argv[0]);
+ jsonAppendSqlValue(pStr, argv[0]);
}
}
static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
pStr->pCtx = ctx;
jsonAppendChar(pStr, ']');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic ? SQLITE_TRANSIENT :
+ sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
@@ -202423,7 +207673,7 @@ static void jsonGroupInverse(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
#ifdef NEVER
/* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will
- ** always have been called to initalize it */
+ ** always have been called to initialize it */
if( NEVER(!pStr) ) return;
#endif
z = pStr->zBuf;
@@ -202467,34 +207717,46 @@ static void jsonObjectStep(
pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
if( pStr ){
if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
+ jsonStringInit(pStr, ctx);
jsonAppendChar(pStr, '{');
}else if( pStr->nUsed>1 ){
jsonAppendChar(pStr, ',');
}
pStr->pCtx = ctx;
z = (const char*)sqlite3_value_text(argv[0]);
- n = (u32)sqlite3_value_bytes(argv[0]);
+ n = sqlite3Strlen30(z);
jsonAppendString(pStr, z, n);
jsonAppendChar(pStr, ':');
- jsonAppendValue(pStr, argv[1]);
+ jsonAppendSqlValue(pStr, argv[1]);
}
}
static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){
JsonString *pStr;
pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
if( pStr ){
+ int flags;
jsonAppendChar(pStr, '}');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
+ pStr->pCtx = ctx;
+ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));
+ if( pStr->eErr ){
+ jsonReturnString(pStr, 0, 0);
+ return;
+ }else if( flags & JSON_BLOB ){
+ jsonReturnStringAsBlob(pStr);
+ if( isFinal ){
+ if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf);
+ }else{
+ jsonStringTrimOneChar(pStr);
+ }
+ return;
}else if( isFinal ){
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic ? SQLITE_TRANSIENT :
+ sqlite3RCStrUnref);
pStr->bStatic = 1;
}else{
sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
+ jsonStringTrimOneChar(pStr);
}
}else{
sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
@@ -202514,19 +207776,37 @@ static void jsonObjectFinal(sqlite3_context *ctx){
/****************************************************************************
** The json_each virtual table
****************************************************************************/
+typedef struct JsonParent JsonParent;
+struct JsonParent {
+ u32 iHead; /* Start of object or array */
+ u32 iValue; /* Start of the value */
+ u32 iEnd; /* First byte past the end */
+ u32 nPath; /* Length of path */
+ i64 iKey; /* Key for JSONB_ARRAY */
+};
+
typedef struct JsonEachCursor JsonEachCursor;
struct JsonEachCursor {
sqlite3_vtab_cursor base; /* Base class - must be first */
u32 iRowid; /* The rowid */
- u32 iBegin; /* The first node of the scan */
- u32 i; /* Index in sParse.aNode[] of current row */
+ u32 i; /* Index in sParse.aBlob[] of current row */
u32 iEnd; /* EOF when i equals or exceeds this value */
- u8 eType; /* Type of top-level element */
+ u32 nRoot; /* Size of the root path in bytes */
+ u8 eType; /* Type of the container for element i */
u8 bRecursive; /* True for json_tree(). False for json_each() */
- char *zJson; /* Input JSON */
- char *zRoot; /* Path by which to filter zJson */
+ u32 nParent; /* Current nesting depth */
+ u32 nParentAlloc; /* Space allocated for aParent[] */
+ JsonParent *aParent; /* Parent elements of i */
+ sqlite3 *db; /* Database connection */
+ JsonString path; /* Current path */
JsonParse sParse; /* Parse of the input JSON */
};
+typedef struct JsonEachConnection JsonEachConnection;
+struct JsonEachConnection {
+ sqlite3_vtab base; /* Base class - must be first */
+ sqlite3 *db; /* Database connection */
+};
+
/* Constructor for the json_each virtual table */
static int jsonEachConnect(
@@ -202536,7 +207816,7 @@ static int jsonEachConnect(
sqlite3_vtab **ppVtab,
char **pzErr
){
- sqlite3_vtab *pNew;
+ JsonEachConnection *pNew;
int rc;
/* Column numbers */
@@ -202562,28 +207842,32 @@ static int jsonEachConnect(
"CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
"json HIDDEN,root HIDDEN)");
if( rc==SQLITE_OK ){
- pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
+ pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew));
+ *ppVtab = (sqlite3_vtab*)pNew;
if( pNew==0 ) return SQLITE_NOMEM;
- memset(pNew, 0, sizeof(*pNew));
sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ pNew->db = db;
}
return rc;
}
/* destructor for json_each virtual table */
static int jsonEachDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
+ JsonEachConnection *p = (JsonEachConnection*)pVtab;
+ sqlite3DbFree(p->db, pVtab);
return SQLITE_OK;
}
/* constructor for a JsonEachCursor object for json_each(). */
static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ JsonEachConnection *pVtab = (JsonEachConnection*)p;
JsonEachCursor *pCur;
UNUSED_PARAMETER(p);
- pCur = sqlite3_malloc( sizeof(*pCur) );
+ pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur));
if( pCur==0 ) return SQLITE_NOMEM;
- memset(pCur, 0, sizeof(*pCur));
+ pCur->db = pVtab->db;
+ jsonStringZero(&pCur->path);
*ppCursor = &pCur->base;
return SQLITE_OK;
}
@@ -202601,22 +207885,24 @@ static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
/* Reset a JsonEachCursor back to its original state. Free any memory
** held. */
static void jsonEachCursorReset(JsonEachCursor *p){
- sqlite3_free(p->zJson);
- sqlite3_free(p->zRoot);
jsonParseReset(&p->sParse);
+ jsonStringReset(&p->path);
+ sqlite3DbFree(p->db, p->aParent);
p->iRowid = 0;
p->i = 0;
+ p->aParent = 0;
+ p->nParent = 0;
+ p->nParentAlloc = 0;
p->iEnd = 0;
p->eType = 0;
- p->zJson = 0;
- p->zRoot = 0;
}
/* Destructor for a jsonEachCursor object */
static int jsonEachClose(sqlite3_vtab_cursor *cur){
JsonEachCursor *p = (JsonEachCursor*)cur;
jsonEachCursorReset(p);
- sqlite3_free(cur);
+
+ sqlite3DbFree(p->db, cur);
return SQLITE_OK;
}
@@ -202627,200 +207913,230 @@ static int jsonEachEof(sqlite3_vtab_cursor *cur){
return p->i >= p->iEnd;
}
-/* Advance the cursor to the next element for json_tree() */
-static int jsonEachNext(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- if( p->bRecursive ){
- if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
- p->i++;
- p->iRowid++;
- if( p->i<p->iEnd ){
- u32 iUp = p->sParse.aUp[p->i];
- JsonNode *pUp = &p->sParse.aNode[iUp];
- p->eType = pUp->eType;
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==0 || pUp->eU==3 );
- testcase( pUp->eU==3 );
- VVA( pUp->eU = 3 );
- if( iUp==p->i-1 ){
- pUp->u.iKey = 0;
- }else{
- pUp->u.iKey++;
+/*
+** If the cursor is currently pointing at the label of a object entry,
+** then return the index of the value. For all other cases, return the
+** current pointer position, which is the value.
+*/
+static int jsonSkipLabel(JsonEachCursor *p){
+ if( p->eType==JSONB_OBJECT ){
+ u32 sz = 0;
+ u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ return p->i + n + sz;
+ }else{
+ return p->i;
+ }
+}
+
+/*
+** Append the path name for the current element.
+*/
+static void jsonAppendPathName(JsonEachCursor *p){
+ assert( p->nParent>0 );
+ assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT );
+ if( p->eType==JSONB_ARRAY ){
+ jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey);
+ }else{
+ u32 n, sz = 0, k, i;
+ const char *z;
+ int needQuote = 0;
+ n = jsonbPayloadSize(&p->sParse, p->i, &sz);
+ k = p->i + n;
+ z = (const char*)&p->sParse.aBlob[k];
+ if( sz==0 || !sqlite3Isalpha(z[0]) ){
+ needQuote = 1;
+ }else{
+ for(i=0; i<sz; i++){
+ if( !sqlite3Isalnum(z[i]) ){
+ needQuote = 1;
+ break;
}
}
}
- }else{
- switch( p->eType ){
- case JSON_ARRAY: {
- p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
- p->iRowid++;
- break;
- }
- case JSON_OBJECT: {
- p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
- p->iRowid++;
- break;
- }
- default: {
- p->i = p->iEnd;
- break;
- }
+ if( needQuote ){
+ jsonPrintf(sz+4,&p->path,".\"%.*s\"", sz, z);
+ }else{
+ jsonPrintf(sz+2,&p->path,".%.*s", sz, z);
}
}
- return SQLITE_OK;
}
-/* Append an object label to the JSON Path being constructed
-** in pStr.
-*/
-static void jsonAppendObjectPathElement(
- JsonString *pStr,
- JsonNode *pNode
-){
- int jj, nn;
- const char *z;
- assert( pNode->eType==JSON_STRING );
- assert( pNode->jnFlags & JNODE_LABEL );
- assert( pNode->eU==1 );
- z = pNode->u.zJContent;
- nn = pNode->n;
- if( (pNode->jnFlags & JNODE_RAW)==0 ){
- assert( nn>=2 );
- assert( z[0]=='"' || z[0]=='\'' );
- assert( z[nn-1]=='"' || z[0]=='\'' );
- if( nn>2 && sqlite3Isalpha(z[1]) ){
- for(jj=2; jj<nn-1 && sqlite3Isalnum(z[jj]); jj++){}
- if( jj==nn-1 ){
- z++;
- nn -= 2;
+/* Advance the cursor to the next element for json_tree() */
+static int jsonEachNext(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ int rc = SQLITE_OK;
+ if( p->bRecursive ){
+ u8 x;
+ u8 levelChange = 0;
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ x = p->sParse.aBlob[i] & 0x0f;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ if( x==JSONB_OBJECT || x==JSONB_ARRAY ){
+ JsonParent *pParent;
+ if( p->nParent>=p->nParentAlloc ){
+ JsonParent *pNew;
+ u64 nNew;
+ nNew = p->nParentAlloc*2 + 3;
+ pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew);
+ if( pNew==0 ) return SQLITE_NOMEM;
+ p->nParentAlloc = (u32)nNew;
+ p->aParent = pNew;
+ }
+ levelChange = 1;
+ pParent = &p->aParent[p->nParent];
+ pParent->iHead = p->i;
+ pParent->iValue = i;
+ pParent->iEnd = i + n + sz;
+ pParent->iKey = -1;
+ pParent->nPath = (u32)p->path.nUsed;
+ if( p->eType && p->nParent ){
+ jsonAppendPathName(p);
+ if( p->path.eErr ) rc = SQLITE_NOMEM;
+ }
+ p->nParent++;
+ p->i = i + n;
+ }else{
+ p->i = i + n + sz;
+ }
+ while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){
+ p->nParent--;
+ p->path.nUsed = p->aParent[p->nParent].nPath;
+ levelChange = 1;
+ }
+ if( levelChange ){
+ if( p->nParent>0 ){
+ JsonParent *pParent = &p->aParent[p->nParent-1];
+ u32 iVal = pParent->iValue;
+ p->eType = p->sParse.aBlob[iVal] & 0x0f;
+ }else{
+ p->eType = 0;
}
}
+ }else{
+ u32 n, sz = 0;
+ u32 i = jsonSkipLabel(p);
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->i = i + n + sz;
+ }
+ if( p->eType==JSONB_ARRAY && p->nParent ){
+ p->aParent[p->nParent-1].iKey++;
}
- jsonPrintf(nn+2, pStr, ".%.*s", nn, z);
+ p->iRowid++;
+ return rc;
}
-/* Append the name of the path for element i to pStr
+/* Length of the path for rowid==0 in bRecursive mode.
*/
-static void jsonEachComputePath(
- JsonEachCursor *p, /* The cursor */
- JsonString *pStr, /* Write the path here */
- u32 i /* Path to this element */
-){
- JsonNode *pNode, *pUp;
- u32 iUp;
- if( i==0 ){
- jsonAppendChar(pStr, '$');
- return;
- }
- iUp = p->sParse.aUp[i];
- jsonEachComputePath(p, pStr, iUp);
- pNode = &p->sParse.aNode[i];
- pUp = &p->sParse.aNode[iUp];
- if( pUp->eType==JSON_ARRAY ){
- assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) );
- testcase( pUp->eU==0 );
- jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
- }else{
- assert( pUp->eType==JSON_OBJECT );
- if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
- jsonAppendObjectPathElement(pStr, pNode);
+static int jsonEachPathLength(JsonEachCursor *p){
+ u32 n = p->path.nUsed;
+ char *z = p->path.zBuf;
+ if( p->iRowid==0 && p->bRecursive && n>=2 ){
+ while( n>1 ){
+ n--;
+ if( z[n]=='[' || z[n]=='.' ){
+ u32 x, sz = 0;
+ char cSaved = z[n];
+ z[n] = 0;
+ assert( p->sParse.eEdit==0 );
+ x = jsonLookupStep(&p->sParse, 0, z+1, 0);
+ z[n] = cSaved;
+ if( JSON_LOOKUP_ISERROR(x) ) continue;
+ if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break;
+ }
+ }
}
+ return n;
}
/* Return the value of a column */
static int jsonEachColumn(
sqlite3_vtab_cursor *cur, /* The cursor */
sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
- int i /* Which column to return */
+ int iColumn /* Which column to return */
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- JsonNode *pThis = &p->sParse.aNode[p->i];
- switch( i ){
+ switch( iColumn ){
case JEACH_KEY: {
- if( p->i==0 ) break;
- if( p->eType==JSON_OBJECT ){
- jsonReturn(pThis, ctx, 0);
- }else if( p->eType==JSON_ARRAY ){
- u32 iKey;
- if( p->bRecursive ){
- if( p->iRowid==0 ) break;
- assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 );
- iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
+ if( p->nParent==0 ){
+ u32 n, j;
+ if( p->nRoot==1 ) break;
+ j = jsonEachPathLength(p);
+ n = p->nRoot - j;
+ if( n==0 ){
+ break;
+ }else if( p->path.zBuf[j]=='[' ){
+ i64 x;
+ sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8);
+ sqlite3_result_int64(ctx, x);
+ }else if( p->path.zBuf[j+1]=='"' ){
+ sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT);
}else{
- iKey = p->iRowid;
+ sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT);
}
- sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
+ break;
+ }
+ if( p->eType==JSONB_OBJECT ){
+ jsonReturnFromBlob(&p->sParse, p->i, ctx, 1);
+ }else{
+ assert( p->eType==JSONB_ARRAY );
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey);
}
break;
}
case JEACH_VALUE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- jsonReturn(pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ jsonReturnFromBlob(&p->sParse, i, ctx, 1);
break;
}
case JEACH_TYPE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
+ u32 i = jsonSkipLabel(p);
+ u8 eType = p->sParse.aBlob[i] & 0x0f;
+ sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC);
break;
}
case JEACH_ATOM: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- if( pThis->eType>=JSON_ARRAY ) break;
- jsonReturn(pThis, ctx, 0);
+ u32 i = jsonSkipLabel(p);
+ if( (p->sParse.aBlob[i] & 0x0f)<JSONB_ARRAY ){
+ jsonReturnFromBlob(&p->sParse, i, ctx, 1);
+ }
break;
}
case JEACH_ID: {
- sqlite3_result_int64(ctx,
- (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
+ sqlite3_result_int64(ctx, (sqlite3_int64)p->i);
break;
}
case JEACH_PARENT: {
- if( p->i>p->iBegin && p->bRecursive ){
- sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
+ if( p->nParent>0 && p->bRecursive ){
+ sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead);
}
break;
}
case JEACH_FULLKEY: {
- JsonString x;
- jsonInit(&x, ctx);
- if( p->bRecursive ){
- jsonEachComputePath(p, &x, p->i);
- }else{
- if( p->zRoot ){
- jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
- }else{
- jsonAppendChar(&x, '$');
- }
- if( p->eType==JSON_ARRAY ){
- jsonPrintf(30, &x, "[%d]", p->iRowid);
- }else if( p->eType==JSON_OBJECT ){
- jsonAppendObjectPathElement(&x, pThis);
- }
- }
- jsonResult(&x);
+ u64 nBase = p->path.nUsed;
+ if( p->nParent ) jsonAppendPathName(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ p->path.nUsed = nBase;
break;
}
case JEACH_PATH: {
- if( p->bRecursive ){
- JsonString x;
- jsonInit(&x, ctx);
- jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
- jsonResult(&x);
- break;
- }
- /* For json_each() path and root are the same so fall through
- ** into the root case */
- /* no break */ deliberate_fall_through
+ u32 n = jsonEachPathLength(p);
+ sqlite3_result_text64(ctx, p->path.zBuf, n,
+ SQLITE_TRANSIENT, SQLITE_UTF8);
+ break;
}
default: {
- const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
- sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
+ sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC);
break;
}
case JEACH_JSON: {
- assert( i==JEACH_JSON );
- sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ if( p->sParse.zJson==0 ){
+ sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob,
+ SQLITE_STATIC);
+ }else{
+ sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ }
break;
}
}
@@ -202911,78 +208227,97 @@ static int jsonEachFilter(
int argc, sqlite3_value **argv
){
JsonEachCursor *p = (JsonEachCursor*)cur;
- const char *z;
const char *zRoot = 0;
- sqlite3_int64 n;
+ u32 i, n, sz;
UNUSED_PARAMETER(idxStr);
UNUSED_PARAMETER(argc);
jsonEachCursorReset(p);
if( idxNum==0 ) return SQLITE_OK;
- z = (const char*)sqlite3_value_text(argv[0]);
- if( z==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[0]);
- p->zJson = sqlite3_malloc64( n+1 );
- if( p->zJson==0 ) return SQLITE_NOMEM;
- memcpy(p->zJson, z, (size_t)n+1);
- if( jsonParse(&p->sParse, 0, p->zJson) ){
- int rc = SQLITE_NOMEM;
- if( p->sParse.oom==0 ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
- if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
+ memset(&p->sParse, 0, sizeof(p->sParse));
+ p->sParse.nJPRef = 1;
+ p->sParse.db = p->db;
+ if( jsonFuncArgMightBeBinary(argv[0]) ){
+ p->sParse.nBlob = sqlite3_value_bytes(argv[0]);
+ p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]);
+ }else{
+ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]);
+ p->sParse.nJson = sqlite3_value_bytes(argv[0]);
+ if( p->sParse.zJson==0 ){
+ p->i = p->iEnd = 0;
+ return SQLITE_OK;
}
- jsonEachCursorReset(p);
- return rc;
- }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
- jsonEachCursorReset(p);
- return SQLITE_NOMEM;
- }else{
- JsonNode *pNode = 0;
- if( idxNum==3 ){
- const char *zErr = 0;
- zRoot = (const char*)sqlite3_value_text(argv[1]);
- if( zRoot==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[1]);
- p->zRoot = sqlite3_malloc64( n+1 );
- if( p->zRoot==0 ) return SQLITE_NOMEM;
- memcpy(p->zRoot, zRoot, (size_t)n+1);
- if( zRoot[0]!='$' ){
- zErr = zRoot;
- }else{
- pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
+ if( jsonConvertTextToBlob(&p->sParse, 0) ){
+ if( p->sParse.oom ){
+ return SQLITE_NOMEM;
}
- if( zErr ){
+ goto json_each_malformed_input;
+ }
+ }
+ if( idxNum==3 ){
+ zRoot = (const char*)sqlite3_value_text(argv[1]);
+ if( zRoot==0 ) return SQLITE_OK;
+ if( zRoot[0]!='$' ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
+ }
+ p->nRoot = sqlite3Strlen30(zRoot);
+ if( zRoot[1]==0 ){
+ i = p->i = 0;
+ p->eType = 0;
+ }else{
+ i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0);
+ if( JSON_LOOKUP_ISERROR(i) ){
+ if( i==JSON_LOOKUP_NOTFOUND ){
+ p->i = 0;
+ p->eType = 0;
+ p->iEnd = 0;
+ return SQLITE_OK;
+ }
sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
+ cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot);
jsonEachCursorReset(p);
return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }else if( pNode==0 ){
- return SQLITE_OK;
}
- }else{
- pNode = p->sParse.aNode;
- }
- p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
- p->eType = pNode->eType;
- if( p->eType>=JSON_ARRAY ){
- assert( pNode->eU==0 );
- VVA( pNode->eU = 3 );
- pNode->u.iKey = 0;
- p->iEnd = p->i + pNode->n + 1;
- if( p->bRecursive ){
- p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
- if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
- p->i--;
- }
+ if( p->sParse.iLabel ){
+ p->i = p->sParse.iLabel;
+ p->eType = JSONB_OBJECT;
}else{
- p->i++;
- }
- }else{
- p->iEnd = p->i+1;
- }
+ p->i = i;
+ p->eType = JSONB_ARRAY;
+ }
+ }
+ jsonAppendRaw(&p->path, zRoot, p->nRoot);
+ }else{
+ i = p->i = 0;
+ p->eType = 0;
+ p->nRoot = 1;
+ jsonAppendRaw(&p->path, "$", 1);
+ }
+ p->nParent = 0;
+ n = jsonbPayloadSize(&p->sParse, i, &sz);
+ p->iEnd = i+n+sz;
+ if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){
+ p->i = i + n;
+ p->eType = p->sParse.aBlob[i] & 0x0f;
+ p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent));
+ if( p->aParent==0 ) return SQLITE_NOMEM;
+ p->nParent = 1;
+ p->nParentAlloc = 1;
+ p->aParent[0].iKey = 0;
+ p->aParent[0].iEnd = p->iEnd;
+ p->aParent[0].iHead = p->i;
+ p->aParent[0].iValue = i;
}
return SQLITE_OK;
+
+json_each_malformed_input:
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
}
/* The methods of the json_each virtual table */
@@ -203010,7 +208345,8 @@ static sqlite3_module jsonEachModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
/* The methods of the json_tree virtual table. */
@@ -203038,7 +208374,8 @@ static sqlite3_module jsonTreeModule = {
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
#endif /* SQLITE_OMIT_VIRTUALTABLE */
#endif /* !defined(SQLITE_OMIT_JSON) */
@@ -203049,34 +208386,57 @@ static sqlite3_module jsonTreeModule = {
SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){
#ifndef SQLITE_OMIT_JSON
static FuncDef aJsonFunc[] = {
- JFUNCTION(json, 1, 0, jsonRemoveFunc),
- JFUNCTION(json_array, -1, 0, jsonArrayFunc),
- JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc),
- JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc),
- JFUNCTION(json_error_position,1, 0, jsonErrorFunc),
- JFUNCTION(json_extract, -1, 0, jsonExtractFunc),
- JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc),
- JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc),
- JFUNCTION(json_insert, -1, 0, jsonSetFunc),
- JFUNCTION(json_object, -1, 0, jsonObjectFunc),
- JFUNCTION(json_patch, 2, 0, jsonPatchFunc),
- JFUNCTION(json_quote, 1, 0, jsonQuoteFunc),
- JFUNCTION(json_remove, -1, 0, jsonRemoveFunc),
- JFUNCTION(json_replace, -1, 0, jsonReplaceFunc),
- JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc),
- JFUNCTION(json_type, 1, 0, jsonTypeFunc),
- JFUNCTION(json_type, 2, 0, jsonTypeFunc),
- JFUNCTION(json_valid, 1, 0, jsonValidFunc),
+ /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */
+ /* | | */
+ /* Uses cache ------, | | ,---- Returns JSONB */
+ /* | | | | */
+ /* Number of arguments ---, | | | | ,--- Flags */
+ /* | | | | | | */
+ JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc),
+ JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc),
+ JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc),
+ JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc),
+ JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc),
+ JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc),
+ JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc),
+ JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc),
+ JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc),
+ JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc),
+ JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc),
+ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc),
+ JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc),
+ JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc),
+ JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc),
+ JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc),
+ JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc),
+ JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc),
+ JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc),
+ JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc),
+ JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc),
+ JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc),
+ JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc),
#if SQLITE_DEBUG
- JFUNCTION(json_parse, 1, 0, jsonParseFunc),
- JFUNCTION(json_test1, 1, 0, jsonTest1Func),
+ JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc),
#endif
WAGGREGATE(json_group_array, 1, 0, 0,
jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
+ SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0,
+ jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
WAGGREGATE(json_group_object, 2, 0, 0,
jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC)
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
+ WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0,
+ jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
+ SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|
+ SQLITE_DETERMINISTIC)
};
sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc));
#endif
@@ -203203,6 +208563,11 @@ typedef unsigned int u32;
#endif
#endif /* !defined(SQLITE_AMALGAMATION) */
+/* Macro to check for 4-byte alignment. Only used inside of assert() */
+#ifdef SQLITE_DEBUG
+# define FOUR_BYTE_ALIGNED(X) ((((char*)(X) - (char*)0) & 3)==0)
+#endif
+
/* #include <string.h> */
/* #include <stdio.h> */
/* #include <assert.h> */
@@ -203268,6 +208633,7 @@ struct Rtree {
int iDepth; /* Current depth of the r-tree structure */
char *zDb; /* Name of database containing r-tree table */
char *zName; /* Name of r-tree table */
+ char *zNodeName; /* Name of the %_node table */
u32 nBusy; /* Current number of users of this structure */
i64 nRowEst; /* Estimated number of rows in this table */
u32 nCursor; /* Number of open cursors */
@@ -203280,7 +208646,6 @@ struct Rtree {
** headed by the node (leaf nodes have RtreeNode.iNode==0).
*/
RtreeNode *pDeleted;
- int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */
/* Blob I/O on xxx_node */
sqlite3_blob *pNodeBlob;
@@ -203577,15 +208942,20 @@ struct RtreeMatchArg {
** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
** at run-time.
*/
-#ifndef SQLITE_BYTEORDER
-# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */
+# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+# define SQLITE_BYTEORDER 4321
+# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+# define SQLITE_BYTEORDER 1234
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1
+# define SQLITE_BYTEORDER 4321
+# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
-# define SQLITE_BYTEORDER 1234
-# elif defined(sparc) || defined(__ppc__) || \
- defined(__ARMEB__) || defined(__AARCH64EB__)
-# define SQLITE_BYTEORDER 4321
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__)
+# define SQLITE_BYTEORDER 4321
# else
# define SQLITE_BYTEORDER 0
# endif
@@ -203609,7 +208979,7 @@ static int readInt16(u8 *p){
return (p[0]<<8) + p[1];
}
static void readCoord(u8 *p, RtreeCoord *pCoord){
- assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(p) );
#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
pCoord->u = _byteswap_ulong(*(u32*)p);
#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -203663,7 +209033,7 @@ static void writeInt16(u8 *p, int i){
}
static int writeCoord(u8 *p, RtreeCoord *pCoord){
u32 i;
- assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(p) );
assert( sizeof(RtreeCoord)==4 );
assert( sizeof(u32)==4 );
#if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -203814,7 +209184,7 @@ static int nodeAcquire(
** increase its reference count and return it.
*/
if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){
- if( pParent && pParent!=pNode->pParent ){
+ if( pParent && ALWAYS(pParent!=pNode->pParent) ){
RTREE_IS_CORRUPT(pRtree);
return SQLITE_CORRUPT_VTAB;
}
@@ -203834,11 +209204,9 @@ static int nodeAcquire(
}
}
if( pRtree->pNodeBlob==0 ){
- char *zTab = sqlite3_mprintf("%s_node", pRtree->zName);
- if( zTab==0 ) return SQLITE_NOMEM;
- rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0,
+ rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, pRtree->zNodeName,
+ "data", iNode, 0,
&pRtree->pNodeBlob);
- sqlite3_free(zTab);
}
if( rc ){
nodeBlobReset(pRtree);
@@ -204391,7 +209759,7 @@ static void rtreeNonleafConstraint(
assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
- assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(pCellData) );
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
case RTREE_FALSE: break; /* Never satisfied */
@@ -204444,7 +209812,7 @@ static void rtreeLeafConstraint(
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
pCellData += 8 + p->iCoord*4;
- assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
+ assert( FOUR_BYTE_ALIGNED(pCellData) );
RTREE_DECODE_COORD(eInt, pCellData, xN);
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
@@ -205014,7 +210382,20 @@ static int rtreeFilter(
p->pInfo->nCoord = pRtree->nDim2;
p->pInfo->anQueue = pCsr->anQueue;
p->pInfo->mxLevel = pRtree->iDepth + 1;
- }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+ }else if( eType==SQLITE_INTEGER ){
+ sqlite3_int64 iVal = sqlite3_value_int64(argv[ii]);
+#ifdef SQLITE_RTREE_INT_ONLY
+ p->u.rValue = iVal;
+#else
+ p->u.rValue = (double)iVal;
+ if( iVal>=((sqlite3_int64)1)<<48
+ || iVal<=-(((sqlite3_int64)1)<<48)
+ ){
+ if( p->op==RTREE_LT ) p->op = RTREE_LE;
+ if( p->op==RTREE_GT ) p->op = RTREE_GE;
+ }
+#endif
+ }else if( eType==SQLITE_FLOAT ){
#ifdef SQLITE_RTREE_INT_ONLY
p->u.rValue = sqlite3_value_int64(argv[ii]);
#else
@@ -205145,11 +210526,12 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
|| p->op==SQLITE_INDEX_CONSTRAINT_MATCH)
){
u8 op;
+ u8 doOmit = 1;
switch( p->op ){
- case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break;
- case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break;
+ case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; doOmit = 0; break;
+ case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; doOmit = 0; break;
case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break;
- case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break;
+ case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; doOmit = 0; break;
case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break;
case SQLITE_INDEX_CONSTRAINT_MATCH: op = RTREE_MATCH; break;
default: op = 0; break;
@@ -205158,15 +210540,19 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
zIdxStr[iIdx++] = op;
zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0');
pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2);
- pIdxInfo->aConstraintUsage[ii].omit = 1;
+ pIdxInfo->aConstraintUsage[ii].omit = doOmit;
}
}
}
pIdxInfo->idxNum = 2;
pIdxInfo->needToFreeIdxStr = 1;
- if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){
- return SQLITE_NOMEM;
+ if( iIdx>0 ){
+ pIdxInfo->idxStr = sqlite3_malloc( iIdx+1 );
+ if( pIdxInfo->idxStr==0 ){
+ return SQLITE_NOMEM;
+ }
+ memcpy(pIdxInfo->idxStr, zIdxStr, iIdx+1);
}
nRow = pRtree->nRowEst >> (iIdx/2);
@@ -205245,31 +210631,22 @@ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
*/
static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
int ii;
- int isInt = (pRtree->eCoordType==RTREE_COORD_INT32);
- for(ii=0; ii<pRtree->nDim2; ii+=2){
- RtreeCoord *a1 = &p1->aCoord[ii];
- RtreeCoord *a2 = &p2->aCoord[ii];
- if( (!isInt && (a2[0].f<a1[0].f || a2[1].f>a1[1].f))
- || ( isInt && (a2[0].i<a1[0].i || a2[1].i>a1[1].i))
- ){
- return 0;
+ if( pRtree->eCoordType==RTREE_COORD_INT32 ){
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
+ RtreeCoord *a1 = &p1->aCoord[ii];
+ RtreeCoord *a2 = &p2->aCoord[ii];
+ if( a2[0].i<a1[0].i || a2[1].i>a1[1].i ) return 0;
+ }
+ }else{
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
+ RtreeCoord *a1 = &p1->aCoord[ii];
+ RtreeCoord *a2 = &p2->aCoord[ii];
+ if( a2[0].f<a1[0].f || a2[1].f>a1[1].f ) return 0;
}
}
return 1;
}
-/*
-** Return the amount cell p would grow by if it were unioned with pCell.
-*/
-static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){
- RtreeDValue area;
- RtreeCell cell;
- memcpy(&cell, p, sizeof(RtreeCell));
- area = cellArea(pRtree, &cell);
- cellUnion(pRtree, &cell, pCell);
- return (cellArea(pRtree, &cell)-area);
-}
-
static RtreeDValue cellOverlap(
Rtree *pRtree,
RtreeCell *p,
@@ -205316,38 +210693,52 @@ static int ChooseLeaf(
for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){
int iCell;
sqlite3_int64 iBest = 0;
-
+ int bFound = 0;
RtreeDValue fMinGrowth = RTREE_ZERO;
RtreeDValue fMinArea = RTREE_ZERO;
-
int nCell = NCELL(pNode);
- RtreeCell cell;
RtreeNode *pChild = 0;
- RtreeCell *aCell = 0;
-
- /* Select the child node which will be enlarged the least if pCell
- ** is inserted into it. Resolve ties by choosing the entry with
- ** the smallest area.
+ /* First check to see if there is are any cells in pNode that completely
+ ** contains pCell. If two or more cells in pNode completely contain pCell
+ ** then pick the smallest.
*/
for(iCell=0; iCell<nCell; iCell++){
- int bBest = 0;
- RtreeDValue growth;
- RtreeDValue area;
+ RtreeCell cell;
nodeGetCell(pRtree, pNode, iCell, &cell);
- growth = cellGrowth(pRtree, &cell, pCell);
- area = cellArea(pRtree, &cell);
- if( iCell==0||growth<fMinGrowth||(growth==fMinGrowth && area<fMinArea) ){
- bBest = 1;
+ if( cellContains(pRtree, &cell, pCell) ){
+ RtreeDValue area = cellArea(pRtree, &cell);
+ if( bFound==0 || area<fMinArea ){
+ iBest = cell.iRowid;
+ fMinArea = area;
+ bFound = 1;
+ }
}
- if( bBest ){
- fMinGrowth = growth;
- fMinArea = area;
- iBest = cell.iRowid;
+ }
+ if( !bFound ){
+ /* No cells of pNode will completely contain pCell. So pick the
+ ** cell of pNode that grows by the least amount when pCell is added.
+ ** Break ties by selecting the smaller cell.
+ */
+ for(iCell=0; iCell<nCell; iCell++){
+ RtreeCell cell;
+ RtreeDValue growth;
+ RtreeDValue area;
+ nodeGetCell(pRtree, pNode, iCell, &cell);
+ area = cellArea(pRtree, &cell);
+ cellUnion(pRtree, &cell, pCell);
+ growth = cellArea(pRtree, &cell)-area;
+ if( iCell==0
+ || growth<fMinGrowth
+ || (growth==fMinGrowth && area<fMinArea)
+ ){
+ fMinGrowth = growth;
+ fMinArea = area;
+ iBest = cell.iRowid;
+ }
}
}
- sqlite3_free(aCell);
rc = nodeAcquire(pRtree, iBest, pNode, &pChild);
nodeRelease(pRtree, pNode);
pNode = pChild;
@@ -205420,77 +210811,6 @@ static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){
static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int);
-/*
-** Arguments aIdx, aDistance and aSpare all point to arrays of size
-** nIdx. The aIdx array contains the set of integers from 0 to
-** (nIdx-1) in no particular order. This function sorts the values
-** in aIdx according to the indexed values in aDistance. For
-** example, assuming the inputs:
-**
-** aIdx = { 0, 1, 2, 3 }
-** aDistance = { 5.0, 2.0, 7.0, 6.0 }
-**
-** this function sets the aIdx array to contain:
-**
-** aIdx = { 0, 1, 2, 3 }
-**
-** The aSpare array is used as temporary working space by the
-** sorting algorithm.
-*/
-static void SortByDistance(
- int *aIdx,
- int nIdx,
- RtreeDValue *aDistance,
- int *aSpare
-){
- if( nIdx>1 ){
- int iLeft = 0;
- int iRight = 0;
-
- int nLeft = nIdx/2;
- int nRight = nIdx-nLeft;
- int *aLeft = aIdx;
- int *aRight = &aIdx[nLeft];
-
- SortByDistance(aLeft, nLeft, aDistance, aSpare);
- SortByDistance(aRight, nRight, aDistance, aSpare);
-
- memcpy(aSpare, aLeft, sizeof(int)*nLeft);
- aLeft = aSpare;
-
- while( iLeft<nLeft || iRight<nRight ){
- if( iLeft==nLeft ){
- aIdx[iLeft+iRight] = aRight[iRight];
- iRight++;
- }else if( iRight==nRight ){
- aIdx[iLeft+iRight] = aLeft[iLeft];
- iLeft++;
- }else{
- RtreeDValue fLeft = aDistance[aLeft[iLeft]];
- RtreeDValue fRight = aDistance[aRight[iRight]];
- if( fLeft<fRight ){
- aIdx[iLeft+iRight] = aLeft[iLeft];
- iLeft++;
- }else{
- aIdx[iLeft+iRight] = aRight[iRight];
- iRight++;
- }
- }
- }
-
-#if 0
- /* Check that the sort worked */
- {
- int jj;
- for(jj=1; jj<nIdx; jj++){
- RtreeDValue left = aDistance[aIdx[jj-1]];
- RtreeDValue right = aDistance[aIdx[jj]];
- assert( left<=right );
- }
- }
-#endif
- }
-}
/*
** Arguments aIdx, aCell and aSpare all point to arrays of size
@@ -205975,107 +211295,6 @@ static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){
return rc;
}
-static int Reinsert(
- Rtree *pRtree,
- RtreeNode *pNode,
- RtreeCell *pCell,
- int iHeight
-){
- int *aOrder;
- int *aSpare;
- RtreeCell *aCell;
- RtreeDValue *aDistance;
- int nCell;
- RtreeDValue aCenterCoord[RTREE_MAX_DIMENSIONS];
- int iDim;
- int ii;
- int rc = SQLITE_OK;
- int n;
-
- memset(aCenterCoord, 0, sizeof(RtreeDValue)*RTREE_MAX_DIMENSIONS);
-
- nCell = NCELL(pNode)+1;
- n = (nCell+1)&(~1);
-
- /* Allocate the buffers used by this operation. The allocation is
- ** relinquished before this function returns.
- */
- aCell = (RtreeCell *)sqlite3_malloc64(n * (
- sizeof(RtreeCell) + /* aCell array */
- sizeof(int) + /* aOrder array */
- sizeof(int) + /* aSpare array */
- sizeof(RtreeDValue) /* aDistance array */
- ));
- if( !aCell ){
- return SQLITE_NOMEM;
- }
- aOrder = (int *)&aCell[n];
- aSpare = (int *)&aOrder[n];
- aDistance = (RtreeDValue *)&aSpare[n];
-
- for(ii=0; ii<nCell; ii++){
- if( ii==(nCell-1) ){
- memcpy(&aCell[ii], pCell, sizeof(RtreeCell));
- }else{
- nodeGetCell(pRtree, pNode, ii, &aCell[ii]);
- }
- aOrder[ii] = ii;
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]);
- aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]);
- }
- }
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2));
- }
-
- for(ii=0; ii<nCell; ii++){
- aDistance[ii] = RTREE_ZERO;
- for(iDim=0; iDim<pRtree->nDim; iDim++){
- RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) -
- DCOORD(aCell[ii].aCoord[iDim*2]));
- aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]);
- }
- }
-
- SortByDistance(aOrder, nCell, aDistance, aSpare);
- nodeZero(pRtree, pNode);
-
- for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){
- RtreeCell *p = &aCell[aOrder[ii]];
- nodeInsertCell(pRtree, pNode, p);
- if( p->iRowid==pCell->iRowid ){
- if( iHeight==0 ){
- rc = rowidWrite(pRtree, p->iRowid, pNode->iNode);
- }else{
- rc = parentWrite(pRtree, p->iRowid, pNode->iNode);
- }
- }
- }
- if( rc==SQLITE_OK ){
- rc = fixBoundingBox(pRtree, pNode);
- }
- for(; rc==SQLITE_OK && ii<nCell; ii++){
- /* Find a node to store this cell in. pNode->iNode currently contains
- ** the height of the sub-tree headed by the cell.
- */
- RtreeNode *pInsert;
- RtreeCell *p = &aCell[aOrder[ii]];
- rc = ChooseLeaf(pRtree, p, iHeight, &pInsert);
- if( rc==SQLITE_OK ){
- int rc2;
- rc = rtreeInsertCell(pRtree, pInsert, p, iHeight);
- rc2 = nodeRelease(pRtree, pInsert);
- if( rc==SQLITE_OK ){
- rc = rc2;
- }
- }
- }
-
- sqlite3_free(aCell);
- return rc;
-}
-
/*
** Insert cell pCell into node pNode. Node pNode is the head of a
** subtree iHeight high (leaf nodes have iHeight==0).
@@ -206096,12 +211315,7 @@ static int rtreeInsertCell(
}
}
if( nodeInsertCell(pRtree, pNode, pCell) ){
- if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){
- rc = SplitNode(pRtree, pNode, pCell, iHeight);
- }else{
- pRtree->iReinsertHeight = iHeight;
- rc = Reinsert(pRtree, pNode, pCell, iHeight);
- }
+ rc = SplitNode(pRtree, pNode, pCell, iHeight);
}else{
rc = AdjustTree(pRtree, pNode, pCell);
if( ALWAYS(rc==SQLITE_OK) ){
@@ -206444,7 +211658,6 @@ static int rtreeUpdate(
}
if( rc==SQLITE_OK ){
int rc2;
- pRtree->iReinsertHeight = -1;
rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
rc2 = nodeRelease(pRtree, pLeaf);
if( rc==SQLITE_OK ){
@@ -206585,8 +211798,11 @@ static int rtreeShadowName(const char *zName){
return 0;
}
+/* Forward declaration */
+static int rtreeIntegrity(sqlite3_vtab*, const char*, const char*, int, char**);
+
static sqlite3_module rtreeModule = {
- 3, /* iVersion */
+ 4, /* iVersion */
rtreeCreate, /* xCreate - create a table */
rtreeConnect, /* xConnect - connect to an existing table */
rtreeBestIndex, /* xBestIndex - Determine search strategy */
@@ -206609,7 +211825,8 @@ static sqlite3_module rtreeModule = {
rtreeSavepoint, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- rtreeShadowName /* xShadowName */
+ rtreeShadowName, /* xShadowName */
+ rtreeIntegrity /* xIntegrity */
};
static int rtreeSqlInit(
@@ -206702,7 +211919,7 @@ static int rtreeSqlInit(
}
sqlite3_free(zSql);
}
- if( pRtree->nAux ){
+ if( pRtree->nAux && rc!=SQLITE_NOMEM ){
pRtree->zReadAuxSql = sqlite3_mprintf(
"SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1",
zDb, zPrefix);
@@ -206865,22 +212082,27 @@ static int rtreeInit(
}
sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+
/* Allocate the sqlite3_vtab structure */
nDb = (int)strlen(argv[1]);
nName = (int)strlen(argv[2]);
- pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8);
if( !pRtree ){
return SQLITE_NOMEM;
}
- memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8);
pRtree->nBusy = 1;
pRtree->base.pModule = &rtreeModule;
pRtree->zDb = (char *)&pRtree[1];
pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->zNodeName = &pRtree->zName[nName+1];
pRtree->eCoordType = (u8)eCoordType;
memcpy(pRtree->zDb, argv[1], nDb);
memcpy(pRtree->zName, argv[2], nName);
+ memcpy(pRtree->zNodeName, argv[2], nName);
+ memcpy(&pRtree->zNodeName[nName], "_node", 6);
/* Create/Connect to the underlying relational database schema. If
@@ -207377,7 +212599,6 @@ static int rtreeCheckTable(
){
RtreeCheck check; /* Common context for various routines */
sqlite3_stmt *pStmt = 0; /* Used to find column count of rtree table */
- int bEnd = 0; /* True if transaction should be closed */
int nAux = 0; /* Number of extra columns. */
/* Initialize the context object */
@@ -207386,24 +212607,14 @@ static int rtreeCheckTable(
check.zDb = zDb;
check.zTab = zTab;
- /* If there is not already an open transaction, open one now. This is
- ** to ensure that the queries run as part of this integrity-check operate
- ** on a consistent snapshot. */
- if( sqlite3_get_autocommit(db) ){
- check.rc = sqlite3_exec(db, "BEGIN", 0, 0, 0);
- bEnd = 1;
- }
-
/* Find the number of auxiliary columns */
- if( check.rc==SQLITE_OK ){
- pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
- if( pStmt ){
- nAux = sqlite3_column_count(pStmt) - 2;
- sqlite3_finalize(pStmt);
- }else
- if( check.rc!=SQLITE_NOMEM ){
- check.rc = SQLITE_OK;
- }
+ pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab);
+ if( pStmt ){
+ nAux = sqlite3_column_count(pStmt) - 2;
+ sqlite3_finalize(pStmt);
+ }else
+ if( check.rc!=SQLITE_NOMEM ){
+ check.rc = SQLITE_OK;
}
/* Find number of dimensions in the rtree table. */
@@ -207434,16 +212645,36 @@ static int rtreeCheckTable(
sqlite3_finalize(check.aCheckMapping[0]);
sqlite3_finalize(check.aCheckMapping[1]);
- /* If one was opened, close the transaction */
- if( bEnd ){
- int rc = sqlite3_exec(db, "END", 0, 0, 0);
- if( check.rc==SQLITE_OK ) check.rc = rc;
- }
*pzReport = check.zReport;
return check.rc;
}
/*
+** Implementation of the xIntegrity method for Rtree.
+*/
+static int rtreeIntegrity(
+ sqlite3_vtab *pVtab, /* The virtual table to check */
+ const char *zSchema, /* Schema in which the virtual table lives */
+ const char *zName, /* Name of the virtual table */
+ int isQuick, /* True for a quick_check */
+ char **pzErr /* Write results here */
+){
+ Rtree *pRtree = (Rtree*)pVtab;
+ int rc;
+ assert( pzErr!=0 && *pzErr==0 );
+ UNUSED_PARAMETER(zSchema);
+ UNUSED_PARAMETER(zName);
+ UNUSED_PARAMETER(isQuick);
+ rc = rtreeCheckTable(pRtree->db, pRtree->zDb, pRtree->zName, pzErr);
+ if( rc==SQLITE_OK && *pzErr ){
+ *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z",
+ pRtree->zDb, pRtree->zName, *pzErr);
+ if( (*pzErr)==0 ) rc = SQLITE_NOMEM;
+ }
+ return rc;
+}
+
+/*
** Usage:
**
** rtreecheck(<rtree-table>);
@@ -208764,24 +213995,28 @@ static int geopolyInit(
(void)pAux;
sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
+ sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
/* Allocate the sqlite3_vtab structure */
nDb = strlen(argv[1]);
nName = strlen(argv[2]);
- pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2);
+ pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8);
if( !pRtree ){
return SQLITE_NOMEM;
}
- memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
+ memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8);
pRtree->nBusy = 1;
pRtree->base.pModule = &rtreeModule;
pRtree->zDb = (char *)&pRtree[1];
pRtree->zName = &pRtree->zDb[nDb+1];
+ pRtree->zNodeName = &pRtree->zName[nName+1];
pRtree->eCoordType = RTREE_COORD_REAL32;
pRtree->nDim = 2;
pRtree->nDim2 = 4;
memcpy(pRtree->zDb, argv[1], nDb);
memcpy(pRtree->zName, argv[2], nName);
+ memcpy(pRtree->zNodeName, argv[2], nName);
+ memcpy(&pRtree->zNodeName[nName], "_node", 6);
/* Create/Connect to the underlying relational database schema. If
@@ -209195,7 +214430,6 @@ static int geopolyUpdate(
}
if( rc==SQLITE_OK ){
int rc2;
- pRtree->iReinsertHeight = -1;
rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
rc2 = nodeRelease(pRtree, pLeaf);
if( rc==SQLITE_OK ){
@@ -209292,7 +214526,8 @@ static sqlite3_module geopolyModule = {
rtreeSavepoint, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- rtreeShadowName /* xShadowName */
+ rtreeShadowName, /* xShadowName */
+ rtreeIntegrity /* xIntegrity */
};
static int sqlite3_geopoly_init(sqlite3 *db){
@@ -217306,7 +222541,8 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
return sqlite3_create_module(db, "dbstat", &dbstat_module, 0);
}
@@ -217743,7 +222979,8 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){
0, /* xSavepoint */
0, /* xRelease */
0, /* xRollbackTo */
- 0 /* xShadowName */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
};
return sqlite3_create_module(db, "sqlite_dbpage", &dbpage_module, 0);
}
@@ -217874,6 +223111,18 @@ struct sqlite3_changeset_iter {
** The data associated with each hash-table entry is a structure containing
** a subset of the initial values that the modified row contained at the
** start of the session. Or no initial values if the row was inserted.
+**
+** pDfltStmt:
+** This is only used by the sqlite3changegroup_xxx() APIs, not by
+** regular sqlite3_session objects. It is a SELECT statement that
+** selects the default value for each table column. For example,
+** if the table is
+**
+** CREATE TABLE xx(a DEFAULT 1, b, c DEFAULT 'abc')
+**
+** then this variable is the compiled version of:
+**
+** SELECT 1, NULL, 'abc'
*/
struct SessionTable {
SessionTable *pNext;
@@ -217882,10 +223131,12 @@ struct SessionTable {
int bStat1; /* True if this is sqlite_stat1 */
int bRowid; /* True if this table uses rowid for PK */
const char **azCol; /* Column names */
+ const char **azDflt; /* Default value expressions */
u8 *abPK; /* Array of primary key flags */
int nEntry; /* Total number of entries in hash table */
int nChange; /* Size of apChange[] array */
SessionChange **apChange; /* Hash table buckets */
+ sqlite3_stmt *pDfltStmt;
};
/*
@@ -218054,6 +223305,7 @@ struct SessionTable {
struct SessionChange {
u8 op; /* One of UPDATE, DELETE, INSERT */
u8 bIndirect; /* True if this change is "indirect" */
+ u16 nRecordField; /* Number of fields in aRecord[] */
int nMaxSize; /* Max size of eventual changeset record */
int nRecord; /* Number of bytes in buffer aRecord[] */
u8 *aRecord; /* Buffer containing old.* record */
@@ -218079,7 +223331,7 @@ static int sessionVarintLen(int iVal){
** Read a varint value from aBuf[] into *piVal. Return the number of
** bytes read.
*/
-static int sessionVarintGet(u8 *aBuf, int *piVal){
+static int sessionVarintGet(const u8 *aBuf, int *piVal){
return getVarint32(aBuf, *piVal);
}
@@ -218342,9 +223594,11 @@ static int sessionPreupdateHash(
** Return the number of bytes of space occupied by the value (including
** the type byte).
*/
-static int sessionSerialLen(u8 *a){
- int e = *a;
+static int sessionSerialLen(const u8 *a){
+ int e;
int n;
+ assert( a!=0 );
+ e = *a;
if( e==0 || e==0xFF ) return 1;
if( e==SQLITE_NULL ) return 1;
if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9;
@@ -218646,6 +223900,7 @@ static int sessionPreupdateEqual(
rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal);
}
assert( rc==SQLITE_OK );
+ (void)rc; /* Suppress warning about unused variable */
if( sqlite3_value_type(pVal)!=eType ) return 0;
/* A SessionChange object never has a NULL value in a PK column */
@@ -218748,13 +224003,14 @@ static int sessionGrowHash(
**
** For example, if the table is declared as:
**
-** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z));
+** CREATE TABLE tbl1(w, x DEFAULT 'abc', y, z, PRIMARY KEY(w, z));
**
-** Then the four output variables are populated as follows:
+** Then the five output variables are populated as follows:
**
** *pnCol = 4
** *pzTab = "tbl1"
** *pazCol = {"w", "x", "y", "z"}
+** *pazDflt = {NULL, 'abc', NULL, NULL}
** *pabPK = {1, 0, 0, 1}
**
** All returned buffers are part of the same single allocation, which must
@@ -218768,6 +224024,7 @@ static int sessionTableInfo(
int *pnCol, /* OUT: number of columns */
const char **pzTab, /* OUT: Copy of zThis */
const char ***pazCol, /* OUT: Array of column names for table */
+ const char ***pazDflt, /* OUT: Array of default value expressions */
u8 **pabPK, /* OUT: Array of booleans - true for PK col */
int *pbRowid /* OUT: True if only PK is a rowid */
){
@@ -218780,11 +224037,18 @@ static int sessionTableInfo(
int i;
u8 *pAlloc = 0;
char **azCol = 0;
+ char **azDflt = 0;
u8 *abPK = 0;
int bRowid = 0; /* Set to true to use rowid as PK */
assert( pazCol && pabPK );
+ *pazCol = 0;
+ *pabPK = 0;
+ *pnCol = 0;
+ if( pzTab ) *pzTab = 0;
+ if( pazDflt ) *pazDflt = 0;
+
nThis = sqlite3Strlen30(zThis);
if( nThis==12 && 0==sqlite3_stricmp("sqlite_stat1", zThis) ){
rc = sqlite3_table_column_metadata(db, zDb, zThis, 0, 0, 0, 0, 0, 0);
@@ -218798,39 +224062,28 @@ static int sessionTableInfo(
}else if( rc==SQLITE_ERROR ){
zPragma = sqlite3_mprintf("");
}else{
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return rc;
}
}else{
zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis);
}
if( !zPragma ){
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return SQLITE_NOMEM;
}
rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0);
sqlite3_free(zPragma);
if( rc!=SQLITE_OK ){
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
return rc;
}
nByte = nThis + 1;
bRowid = (pbRowid!=0);
while( SQLITE_ROW==sqlite3_step(pStmt) ){
- nByte += sqlite3_column_bytes(pStmt, 1);
+ nByte += sqlite3_column_bytes(pStmt, 1); /* name */
+ nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */
nDbCol++;
- if( sqlite3_column_int(pStmt, 5) ) bRowid = 0;
+ if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */
}
if( nDbCol==0 ) bRowid = 0;
nDbCol += bRowid;
@@ -218838,15 +224091,18 @@ static int sessionTableInfo(
rc = sqlite3_reset(pStmt);
if( rc==SQLITE_OK ){
- nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1);
+ nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1);
pAlloc = sessionMalloc64(pSession, nByte);
if( pAlloc==0 ){
rc = SQLITE_NOMEM;
+ }else{
+ memset(pAlloc, 0, nByte);
}
}
if( rc==SQLITE_OK ){
azCol = (char **)pAlloc;
- pAlloc = (u8 *)&azCol[nDbCol];
+ azDflt = (char**)&azCol[nDbCol];
+ pAlloc = (u8 *)&azDflt[nDbCol];
abPK = (u8 *)pAlloc;
pAlloc = &abPK[nDbCol];
if( pzTab ){
@@ -218866,11 +224122,21 @@ static int sessionTableInfo(
}
while( SQLITE_ROW==sqlite3_step(pStmt) ){
int nName = sqlite3_column_bytes(pStmt, 1);
+ int nDflt = sqlite3_column_bytes(pStmt, 4);
const unsigned char *zName = sqlite3_column_text(pStmt, 1);
+ const unsigned char *zDflt = sqlite3_column_text(pStmt, 4);
+
if( zName==0 ) break;
memcpy(pAlloc, zName, nName+1);
azCol[i] = (char *)pAlloc;
pAlloc += nName+1;
+ if( zDflt ){
+ memcpy(pAlloc, zDflt, nDflt+1);
+ azDflt[i] = (char *)pAlloc;
+ pAlloc += nDflt+1;
+ }else{
+ azDflt[i] = 0;
+ }
abPK[i] = sqlite3_column_int(pStmt, 5);
i++;
}
@@ -218881,14 +224147,11 @@ static int sessionTableInfo(
** free any allocation made. An error code will be returned in this case.
*/
if( rc==SQLITE_OK ){
- *pazCol = (const char **)azCol;
+ *pazCol = (const char**)azCol;
+ if( pazDflt ) *pazDflt = (const char**)azDflt;
*pabPK = abPK;
*pnCol = nDbCol;
}else{
- *pazCol = 0;
- *pabPK = 0;
- *pnCol = 0;
- if( pzTab ) *pzTab = 0;
sessionFree(pSession, azCol);
}
if( pbRowid ) *pbRowid = bRowid;
@@ -218897,10 +224160,9 @@ static int sessionTableInfo(
}
/*
-** This function is only called from within a pre-update handler for a
-** write to table pTab, part of session pSession. If this is the first
-** write to this table, initalize the SessionTable.nCol, azCol[] and
-** abPK[] arrays accordingly.
+** This function is called to initialize the SessionTable.nCol, azCol[]
+** abPK[] and azDflt[] members of SessionTable object pTab. If these
+** fields are already initilialized, this function is a no-op.
**
** If an error occurs, an error code is stored in sqlite3_session.rc and
** non-zero returned. Or, if no error occurs but the table has no primary
@@ -218908,15 +224170,22 @@ static int sessionTableInfo(
** indicate that updates on this table should be ignored. SessionTable.abPK
** is set to NULL in this case.
*/
-static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){
+static int sessionInitTable(
+ sqlite3_session *pSession, /* Optional session handle */
+ SessionTable *pTab, /* Table object to initialize */
+ sqlite3 *db, /* Database handle to read schema from */
+ const char *zDb /* Name of db - "main", "temp" etc. */
+){
+ int rc = SQLITE_OK;
+
if( pTab->nCol==0 ){
u8 *abPK;
assert( pTab->azCol==0 || pTab->abPK==0 );
- pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb,
- pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK,
- (pSession->bImplicitPK ? &pTab->bRowid : 0)
+ rc = sessionTableInfo(pSession, db, zDb,
+ pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK,
+ ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0)
);
- if( pSession->rc==SQLITE_OK ){
+ if( rc==SQLITE_OK ){
int i;
for(i=0; i<pTab->nCol; i++){
if( abPK[i] ){
@@ -218928,14 +224197,321 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){
pTab->bStat1 = 1;
}
- if( pSession->bEnableSize ){
+ if( pSession && pSession->bEnableSize ){
pSession->nMaxChangesetSize += (
1 + sessionVarintLen(pTab->nCol) + pTab->nCol + strlen(pTab->zName)+1
);
}
}
}
- return (pSession->rc || pTab->abPK==0);
+
+ if( pSession ){
+ pSession->rc = rc;
+ return (rc || pTab->abPK==0);
+ }
+ return rc;
+}
+
+/*
+** Re-initialize table object pTab.
+*/
+static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){
+ int nCol = 0;
+ const char **azCol = 0;
+ const char **azDflt = 0;
+ u8 *abPK = 0;
+ int bRowid = 0;
+
+ assert( pSession->rc==SQLITE_OK );
+
+ pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb,
+ pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK,
+ (pSession->bImplicitPK ? &bRowid : 0)
+ );
+ if( pSession->rc==SQLITE_OK ){
+ if( pTab->nCol>nCol || pTab->bRowid!=bRowid ){
+ pSession->rc = SQLITE_SCHEMA;
+ }else{
+ int ii;
+ int nOldCol = pTab->nCol;
+ for(ii=0; ii<nCol; ii++){
+ if( ii<pTab->nCol ){
+ if( pTab->abPK[ii]!=abPK[ii] ){
+ pSession->rc = SQLITE_SCHEMA;
+ }
+ }else if( abPK[ii] ){
+ pSession->rc = SQLITE_SCHEMA;
+ }
+ }
+
+ if( pSession->rc==SQLITE_OK ){
+ const char **a = pTab->azCol;
+ pTab->azCol = azCol;
+ pTab->nCol = nCol;
+ pTab->azDflt = azDflt;
+ pTab->abPK = abPK;
+ azCol = a;
+ }
+ if( pSession->bEnableSize ){
+ pSession->nMaxChangesetSize += (nCol - nOldCol);
+ pSession->nMaxChangesetSize += sessionVarintLen(nCol);
+ pSession->nMaxChangesetSize -= sessionVarintLen(nOldCol);
+ }
+ }
+ }
+
+ sqlite3_free((char*)azCol);
+ return pSession->rc;
+}
+
+/*
+** Session-change object (*pp) contains an old.* record with fewer than
+** nCol fields. This function updates it with the default values for
+** the missing fields.
+*/
+static void sessionUpdateOneChange(
+ sqlite3_session *pSession, /* For memory accounting */
+ int *pRc, /* IN/OUT: Error code */
+ SessionChange **pp, /* IN/OUT: Change object to update */
+ int nCol, /* Number of columns now in table */
+ sqlite3_stmt *pDflt /* SELECT <default-values...> */
+){
+ SessionChange *pOld = *pp;
+
+ while( pOld->nRecordField<nCol ){
+ SessionChange *pNew = 0;
+ int nByte = 0;
+ int nIncr = 0;
+ int iField = pOld->nRecordField;
+ int eType = sqlite3_column_type(pDflt, iField);
+ switch( eType ){
+ case SQLITE_NULL:
+ nIncr = 1;
+ break;
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT:
+ nIncr = 9;
+ break;
+ default: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ nIncr = 1 + sessionVarintLen(n) + n;
+ assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB );
+ break;
+ }
+ }
+
+ nByte = nIncr + (sizeof(SessionChange) + pOld->nRecord);
+ pNew = sessionMalloc64(pSession, nByte);
+ if( pNew==0 ){
+ *pRc = SQLITE_NOMEM;
+ return;
+ }else{
+ memcpy(pNew, pOld, sizeof(SessionChange));
+ pNew->aRecord = (u8*)&pNew[1];
+ memcpy(pNew->aRecord, pOld->aRecord, pOld->nRecord);
+ pNew->aRecord[pNew->nRecord++] = (u8)eType;
+ switch( eType ){
+ case SQLITE_INTEGER: {
+ i64 iVal = sqlite3_column_int64(pDflt, iField);
+ sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal);
+ pNew->nRecord += 8;
+ break;
+ }
+
+ case SQLITE_FLOAT: {
+ double rVal = sqlite3_column_double(pDflt, iField);
+ i64 iVal = 0;
+ memcpy(&iVal, &rVal, sizeof(rVal));
+ sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal);
+ pNew->nRecord += 8;
+ break;
+ }
+
+ case SQLITE_TEXT: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ const char *z = (const char*)sqlite3_column_text(pDflt, iField);
+ pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n);
+ memcpy(&pNew->aRecord[pNew->nRecord], z, n);
+ pNew->nRecord += n;
+ break;
+ }
+
+ case SQLITE_BLOB: {
+ int n = sqlite3_column_bytes(pDflt, iField);
+ const u8 *z = (const u8*)sqlite3_column_blob(pDflt, iField);
+ pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n);
+ memcpy(&pNew->aRecord[pNew->nRecord], z, n);
+ pNew->nRecord += n;
+ break;
+ }
+
+ default:
+ assert( eType==SQLITE_NULL );
+ break;
+ }
+
+ sessionFree(pSession, pOld);
+ *pp = pOld = pNew;
+ pNew->nRecordField++;
+ pNew->nMaxSize += nIncr;
+ if( pSession ){
+ pSession->nMaxChangesetSize += nIncr;
+ }
+ }
+ }
+}
+
+/*
+** Ensure that there is room in the buffer to append nByte bytes of data.
+** If not, use sqlite3_realloc() to grow the buffer so that there is.
+**
+** If successful, return zero. Otherwise, if an OOM condition is encountered,
+** set *pRc to SQLITE_NOMEM and return non-zero.
+*/
+static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){
+#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1)
+ i64 nReq = p->nBuf + nByte;
+ if( *pRc==SQLITE_OK && nReq>p->nAlloc ){
+ u8 *aNew;
+ i64 nNew = p->nAlloc ? p->nAlloc : 128;
+
+ do {
+ nNew = nNew*2;
+ }while( nNew<nReq );
+
+ /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation
+ ** of sqlite3_realloc64(). Allocations greater than this size in bytes
+ ** always fail. It is used here to ensure that this routine can always
+ ** allocate up to this limit - instead of up to the largest power of
+ ** two smaller than the limit. */
+ if( nNew>SESSION_MAX_BUFFER_SZ ){
+ nNew = SESSION_MAX_BUFFER_SZ;
+ if( nNew<nReq ){
+ *pRc = SQLITE_NOMEM;
+ return 1;
+ }
+ }
+
+ aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew);
+ if( 0==aNew ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ p->aBuf = aNew;
+ p->nAlloc = nNew;
+ }
+ }
+ return (*pRc!=SQLITE_OK);
+}
+
+
+/*
+** This function is a no-op if *pRc is other than SQLITE_OK when it is
+** called. Otherwise, append a string to the buffer. All bytes in the string
+** up to (but not including) the nul-terminator are written to the buffer.
+**
+** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
+** returning.
+*/
+static void sessionAppendStr(
+ SessionBuffer *p,
+ const char *zStr,
+ int *pRc
+){
+ int nStr = sqlite3Strlen30(zStr);
+ if( 0==sessionBufferGrow(p, nStr+1, pRc) ){
+ memcpy(&p->aBuf[p->nBuf], zStr, nStr);
+ p->nBuf += nStr;
+ p->aBuf[p->nBuf] = 0x00;
+ }
+}
+
+/*
+** Format a string using printf() style formatting and then append it to the
+** buffer using sessionAppendString().
+*/
+static void sessionAppendPrintf(
+ SessionBuffer *p, /* Buffer to append to */
+ int *pRc,
+ const char *zFmt,
+ ...
+){
+ if( *pRc==SQLITE_OK ){
+ char *zApp = 0;
+ va_list ap;
+ va_start(ap, zFmt);
+ zApp = sqlite3_vmprintf(zFmt, ap);
+ if( zApp==0 ){
+ *pRc = SQLITE_NOMEM;
+ }else{
+ sessionAppendStr(p, zApp, pRc);
+ }
+ va_end(ap);
+ sqlite3_free(zApp);
+ }
+}
+
+/*
+** Prepare a statement against database handle db that SELECTs a single
+** row containing the default values for each column in table pTab. For
+** example, if pTab is declared as:
+**
+** CREATE TABLE pTab(a PRIMARY KEY, b DEFAULT 123, c DEFAULT 'abcd');
+**
+** Then this function prepares and returns the SQL statement:
+**
+** SELECT NULL, 123, 'abcd';
+*/
+static int sessionPrepareDfltStmt(
+ sqlite3 *db, /* Database handle */
+ SessionTable *pTab, /* Table to prepare statement for */
+ sqlite3_stmt **ppStmt /* OUT: Statement handle */
+){
+ SessionBuffer sql = {0,0,0};
+ int rc = SQLITE_OK;
+ const char *zSep = " ";
+ int ii = 0;
+
+ *ppStmt = 0;
+ sessionAppendPrintf(&sql, &rc, "SELECT");
+ for(ii=0; ii<pTab->nCol; ii++){
+ const char *zDflt = pTab->azDflt[ii] ? pTab->azDflt[ii] : "NULL";
+ sessionAppendPrintf(&sql, &rc, "%s%s", zSep, zDflt);
+ zSep = ", ";
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_prepare_v2(db, (const char*)sql.aBuf, -1, ppStmt, 0);
+ }
+ sqlite3_free(sql.aBuf);
+
+ return rc;
+}
+
+/*
+** Table pTab has one or more existing change-records with old.* records
+** with fewer than pTab->nCol columns. This function updates all such
+** change-records with the default values for the missing columns.
+*/
+static int sessionUpdateChanges(sqlite3_session *pSession, SessionTable *pTab){
+ sqlite3_stmt *pStmt = 0;
+ int rc = pSession->rc;
+
+ rc = sessionPrepareDfltStmt(pSession->db, pTab, &pStmt);
+ if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){
+ int ii = 0;
+ SessionChange **pp = 0;
+ for(ii=0; ii<pTab->nChange; ii++){
+ for(pp=&pTab->apChange[ii]; *pp; pp=&((*pp)->pNext)){
+ if( (*pp)->nRecordField!=pTab->nCol ){
+ sessionUpdateOneChange(pSession, &rc, pp, pTab->nCol, pStmt);
+ }
+ }
+ }
+ }
+
+ pSession->rc = rc;
+ rc = sqlite3_finalize(pStmt);
+ if( pSession->rc==SQLITE_OK ) pSession->rc = rc;
+ return pSession->rc;
}
/*
@@ -219098,16 +224674,22 @@ static void sessionPreupdateOneChange(
int iHash;
int bNull = 0;
int rc = SQLITE_OK;
+ int nExpect = 0;
SessionStat1Ctx stat1 = {{0,0,0,0,0},0};
if( pSession->rc ) return;
/* Load table details if required */
- if( sessionInitTable(pSession, pTab) ) return;
+ if( sessionInitTable(pSession, pTab, pSession->db, pSession->zDb) ) return;
/* Check the number of columns in this xPreUpdate call matches the
** number of columns in the table. */
- if( (pTab->nCol-pTab->bRowid)!=pSession->hook.xCount(pSession->hook.pCtx) ){
+ nExpect = pSession->hook.xCount(pSession->hook.pCtx);
+ if( (pTab->nCol-pTab->bRowid)<nExpect ){
+ if( sessionReinitTable(pSession, pTab) ) return;
+ if( sessionUpdateChanges(pSession, pTab) ) return;
+ }
+ if( (pTab->nCol-pTab->bRowid)!=nExpect ){
pSession->rc = SQLITE_SCHEMA;
return;
}
@@ -219184,7 +224766,7 @@ static void sessionPreupdateOneChange(
}
/* Allocate the change object */
- pC = (SessionChange *)sessionMalloc64(pSession, nByte);
+ pC = (SessionChange*)sessionMalloc64(pSession, nByte);
if( !pC ){
rc = SQLITE_NOMEM;
goto error_out;
@@ -219217,6 +224799,7 @@ static void sessionPreupdateOneChange(
if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){
pC->bIndirect = 1;
}
+ pC->nRecordField = pTab->nCol;
pC->nRecord = nByte;
pC->op = op;
pC->pNext = pTab->apChange[iHash];
@@ -219596,7 +225179,7 @@ SQLITE_API int sqlite3session_diff(
/* Locate and if necessary initialize the target table object */
rc = sessionFindTable(pSession, zTbl, &pTo);
if( pTo==0 ) goto diff_out;
- if( sessionInitTable(pSession, pTo) ){
+ if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){
rc = pSession->rc;
goto diff_out;
}
@@ -219609,7 +225192,7 @@ SQLITE_API int sqlite3session_diff(
int bRowid = 0;
u8 *abPK;
const char **azCol = 0;
- rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK,
+ rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK,
pSession->bImplicitPK ? &bRowid : 0
);
if( rc==SQLITE_OK ){
@@ -219724,6 +225307,7 @@ static void sessionDeleteTable(sqlite3_session *pSession, SessionTable *pList){
sessionFree(pSession, p);
}
}
+ sqlite3_finalize(pTab->pDfltStmt);
sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */
sessionFree(pSession, pTab->apChange);
sessionFree(pSession, pTab);
@@ -219756,9 +225340,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){
** associated hash-tables. */
sessionDeleteTable(pSession, pSession->pTable);
- /* Assert that all allocations have been freed and then free the
- ** session object itself. */
- assert( pSession->nMalloc==0 );
+ /* Free the session object. */
sqlite3_free(pSession);
}
@@ -219830,48 +225412,6 @@ SQLITE_API int sqlite3session_attach(
}
/*
-** Ensure that there is room in the buffer to append nByte bytes of data.
-** If not, use sqlite3_realloc() to grow the buffer so that there is.
-**
-** If successful, return zero. Otherwise, if an OOM condition is encountered,
-** set *pRc to SQLITE_NOMEM and return non-zero.
-*/
-static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){
-#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1)
- i64 nReq = p->nBuf + nByte;
- if( *pRc==SQLITE_OK && nReq>p->nAlloc ){
- u8 *aNew;
- i64 nNew = p->nAlloc ? p->nAlloc : 128;
-
- do {
- nNew = nNew*2;
- }while( nNew<nReq );
-
- /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation
- ** of sqlite3_realloc64(). Allocations greater than this size in bytes
- ** always fail. It is used here to ensure that this routine can always
- ** allocate up to this limit - instead of up to the largest power of
- ** two smaller than the limit. */
- if( nNew>SESSION_MAX_BUFFER_SZ ){
- nNew = SESSION_MAX_BUFFER_SZ;
- if( nNew<nReq ){
- *pRc = SQLITE_NOMEM;
- return 1;
- }
- }
-
- aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew);
- if( 0==aNew ){
- *pRc = SQLITE_NOMEM;
- }else{
- p->aBuf = aNew;
- p->nAlloc = nNew;
- }
- }
- return (*pRc!=SQLITE_OK);
-}
-
-/*
** Append the value passed as the second argument to the buffer passed
** as the first.
**
@@ -219941,27 +225481,6 @@ static void sessionAppendBlob(
/*
** This function is a no-op if *pRc is other than SQLITE_OK when it is
-** called. Otherwise, append a string to the buffer. All bytes in the string
-** up to (but not including) the nul-terminator are written to the buffer.
-**
-** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before
-** returning.
-*/
-static void sessionAppendStr(
- SessionBuffer *p,
- const char *zStr,
- int *pRc
-){
- int nStr = sqlite3Strlen30(zStr);
- if( 0==sessionBufferGrow(p, nStr+1, pRc) ){
- memcpy(&p->aBuf[p->nBuf], zStr, nStr);
- p->nBuf += nStr;
- p->aBuf[p->nBuf] = 0x00;
- }
-}
-
-/*
-** This function is a no-op if *pRc is other than SQLITE_OK when it is
** called. Otherwise, append the string representation of integer iVal
** to the buffer. No nul-terminator is written.
**
@@ -219978,27 +225497,6 @@ static void sessionAppendInteger(
sessionAppendStr(p, aBuf, pRc);
}
-static void sessionAppendPrintf(
- SessionBuffer *p, /* Buffer to append to */
- int *pRc,
- const char *zFmt,
- ...
-){
- if( *pRc==SQLITE_OK ){
- char *zApp = 0;
- va_list ap;
- va_start(ap, zFmt);
- zApp = sqlite3_vmprintf(zFmt, ap);
- if( zApp==0 ){
- *pRc = SQLITE_NOMEM;
- }else{
- sessionAppendStr(p, zApp, pRc);
- }
- va_end(ap);
- sqlite3_free(zApp);
- }
-}
-
/*
** This function is a no-op if *pRc is other than SQLITE_OK when it is
** called. Otherwise, append the string zStr enclosed in quotes (") and
@@ -220489,26 +225987,16 @@ static int sessionGenerateChangeset(
for(pTab=pSession->pTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){
if( pTab->nEntry ){
const char *zName = pTab->zName;
- int nCol = 0; /* Number of columns in table */
- u8 *abPK = 0; /* Primary key array */
- const char **azCol = 0; /* Table columns */
int i; /* Used to iterate through hash buckets */
sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */
int nRewind = buf.nBuf; /* Initial size of write buffer */
int nNoop; /* Size of buffer after writing tbl header */
- int bRowid = 0;
+ int nOldCol = pTab->nCol;
/* Check the table schema is still Ok. */
- rc = sessionTableInfo(
- 0, db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK,
- (pSession->bImplicitPK ? &bRowid : 0)
- );
- if( rc==SQLITE_OK && (
- pTab->nCol!=nCol
- || pTab->bRowid!=bRowid
- || memcmp(abPK, pTab->abPK, nCol)
- )){
- rc = SQLITE_SCHEMA;
+ rc = sessionReinitTable(pSession, pTab);
+ if( rc==SQLITE_OK && pTab->nCol!=nOldCol ){
+ rc = sessionUpdateChanges(pSession, pTab);
}
/* Write a table header */
@@ -220516,8 +226004,8 @@ static int sessionGenerateChangeset(
/* Build and compile a statement to execute: */
if( rc==SQLITE_OK ){
- rc = sessionSelectStmt(
- db, 0, pSession->zDb, zName, bRowid, nCol, azCol, abPK, &pSel
+ rc = sessionSelectStmt(db, 0, pSession->zDb,
+ zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel
);
}
@@ -220526,22 +226014,22 @@ static int sessionGenerateChangeset(
SessionChange *p; /* Used to iterate through changes */
for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){
- rc = sessionSelectBind(pSel, nCol, abPK, p);
+ rc = sessionSelectBind(pSel, pTab->nCol, pTab->abPK, p);
if( rc!=SQLITE_OK ) continue;
if( sqlite3_step(pSel)==SQLITE_ROW ){
if( p->op==SQLITE_INSERT ){
int iCol;
sessionAppendByte(&buf, SQLITE_INSERT, &rc);
sessionAppendByte(&buf, p->bIndirect, &rc);
- for(iCol=0; iCol<nCol; iCol++){
+ for(iCol=0; iCol<pTab->nCol; iCol++){
sessionAppendCol(&buf, pSel, iCol, &rc);
}
}else{
- assert( abPK!=0 ); /* Because sessionSelectStmt() returned ok */
- rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK);
+ assert( pTab->abPK!=0 );
+ rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, pTab->abPK);
}
}else if( p->op!=SQLITE_INSERT ){
- rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK);
+ rc = sessionAppendDelete(&buf, bPatchset, p, pTab->nCol,pTab->abPK);
}
if( rc==SQLITE_OK ){
rc = sqlite3_reset(pSel);
@@ -220566,7 +226054,6 @@ static int sessionGenerateChangeset(
if( buf.nBuf==nNoop ){
buf.nBuf = nRewind;
}
- sqlite3_free((char*)azCol); /* cast works around VC++ bug */
}
}
@@ -220990,15 +226477,19 @@ static int sessionReadRecord(
}
}
if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
- sqlite3_int64 v = sessionGetI64(aVal);
- if( eType==SQLITE_INTEGER ){
- sqlite3VdbeMemSetInt64(apOut[i], v);
+ if( (pIn->nData-pIn->iNext)<8 ){
+ rc = SQLITE_CORRUPT_BKPT;
}else{
- double d;
- memcpy(&d, &v, 8);
- sqlite3VdbeMemSetDouble(apOut[i], d);
+ sqlite3_int64 v = sessionGetI64(aVal);
+ if( eType==SQLITE_INTEGER ){
+ sqlite3VdbeMemSetInt64(apOut[i], v);
+ }else{
+ double d;
+ memcpy(&d, &v, 8);
+ sqlite3VdbeMemSetDouble(apOut[i], d);
+ }
+ pIn->iNext += 8;
}
- pIn->iNext += 8;
}
}
}
@@ -222691,7 +228182,7 @@ static int sessionChangesetApply(
sqlite3changeset_pk(pIter, &abPK, 0);
rc = sessionTableInfo(0, db, "main", zNew,
- &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK, &sApply.bRowid
+ &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid
);
if( rc!=SQLITE_OK ) break;
for(i=0; i<sApply.nCol; i++){
@@ -222823,11 +228314,24 @@ SQLITE_API int sqlite3changeset_apply_v2(
sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */
int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT);
int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1);
+ u64 savedFlag = db->flags & SQLITE_FkNoAction;
+
+ if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){
+ db->flags |= ((u64)SQLITE_FkNoAction);
+ db->aDb[0].pSchema->schema_cookie -= 32;
+ }
+
if( rc==SQLITE_OK ){
rc = sessionChangesetApply(
db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags
);
}
+
+ if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){
+ assert( db->flags & SQLITE_FkNoAction );
+ db->flags &= ~((u64)SQLITE_FkNoAction);
+ db->aDb[0].pSchema->schema_cookie -= 32;
+ }
return rc;
}
@@ -222915,6 +228419,9 @@ struct sqlite3_changegroup {
int rc; /* Error code */
int bPatch; /* True to accumulate patchsets */
SessionTable *pList; /* List of tables in current patch */
+
+ sqlite3 *db; /* Configured by changegroup_schema() */
+ char *zDb; /* Configured by changegroup_schema() */
};
/*
@@ -222935,6 +228442,7 @@ static int sessionChangeMerge(
){
SessionChange *pNew = 0;
int rc = SQLITE_OK;
+ assert( aRec!=0 );
if( !pExist ){
pNew = (SessionChange *)sqlite3_malloc64(sizeof(SessionChange) + nRec);
@@ -223101,6 +228609,114 @@ static int sessionChangeMerge(
}
/*
+** Check if a changeset entry with nCol columns and the PK array passed
+** as the final argument to this function is compatible with SessionTable
+** pTab. If so, return 1. Otherwise, if they are incompatible in some way,
+** return 0.
+*/
+static int sessionChangesetCheckCompat(
+ SessionTable *pTab,
+ int nCol,
+ u8 *abPK
+){
+ if( pTab->azCol && nCol<pTab->nCol ){
+ int ii;
+ for(ii=0; ii<pTab->nCol; ii++){
+ u8 bPK = (ii < nCol) ? abPK[ii] : 0;
+ if( pTab->abPK[ii]!=bPK ) return 0;
+ }
+ return 1;
+ }
+ return (pTab->nCol==nCol && 0==memcmp(abPK, pTab->abPK, nCol));
+}
+
+static int sessionChangesetExtendRecord(
+ sqlite3_changegroup *pGrp,
+ SessionTable *pTab,
+ int nCol,
+ int op,
+ const u8 *aRec,
+ int nRec,
+ SessionBuffer *pOut
+){
+ int rc = SQLITE_OK;
+ int ii = 0;
+
+ assert( pTab->azCol );
+ assert( nCol<pTab->nCol );
+
+ pOut->nBuf = 0;
+ if( op==SQLITE_INSERT || (op==SQLITE_DELETE && pGrp->bPatch==0) ){
+ /* Append the missing default column values to the record. */
+ sessionAppendBlob(pOut, aRec, nRec, &rc);
+ if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){
+ rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt);
+ }
+ for(ii=nCol; rc==SQLITE_OK && ii<pTab->nCol; ii++){
+ int eType = sqlite3_column_type(pTab->pDfltStmt, ii);
+ sessionAppendByte(pOut, eType, &rc);
+ switch( eType ){
+ case SQLITE_FLOAT:
+ case SQLITE_INTEGER: {
+ i64 iVal;
+ if( eType==SQLITE_INTEGER ){
+ iVal = sqlite3_column_int64(pTab->pDfltStmt, ii);
+ }else{
+ double rVal = sqlite3_column_int64(pTab->pDfltStmt, ii);
+ memcpy(&iVal, &rVal, sizeof(i64));
+ }
+ if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){
+ sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal);
+ }
+ break;
+ }
+
+ case SQLITE_BLOB:
+ case SQLITE_TEXT: {
+ int n = sqlite3_column_bytes(pTab->pDfltStmt, ii);
+ sessionAppendVarint(pOut, n, &rc);
+ if( eType==SQLITE_TEXT ){
+ const u8 *z = (const u8*)sqlite3_column_text(pTab->pDfltStmt, ii);
+ sessionAppendBlob(pOut, z, n, &rc);
+ }else{
+ const u8 *z = (const u8*)sqlite3_column_blob(pTab->pDfltStmt, ii);
+ sessionAppendBlob(pOut, z, n, &rc);
+ }
+ break;
+ }
+
+ default:
+ assert( eType==SQLITE_NULL );
+ break;
+ }
+ }
+ }else if( op==SQLITE_UPDATE ){
+ /* Append missing "undefined" entries to the old.* record. And, if this
+ ** is an UPDATE, to the new.* record as well. */
+ int iOff = 0;
+ if( pGrp->bPatch==0 ){
+ for(ii=0; ii<nCol; ii++){
+ iOff += sessionSerialLen(&aRec[iOff]);
+ }
+ sessionAppendBlob(pOut, aRec, iOff, &rc);
+ for(ii=0; ii<(pTab->nCol-nCol); ii++){
+ sessionAppendByte(pOut, 0x00, &rc);
+ }
+ }
+
+ sessionAppendBlob(pOut, &aRec[iOff], nRec-iOff, &rc);
+ for(ii=0; ii<(pTab->nCol-nCol); ii++){
+ sessionAppendByte(pOut, 0x00, &rc);
+ }
+ }else{
+ assert( op==SQLITE_DELETE && pGrp->bPatch );
+ sessionAppendBlob(pOut, aRec, nRec, &rc);
+ }
+
+ return rc;
+}
+
+/*
** Add all changes in the changeset traversed by the iterator passed as
** the first argument to the changegroup hash tables.
*/
@@ -223113,6 +228729,7 @@ static int sessionChangesetToHash(
int nRec;
int rc = SQLITE_OK;
SessionTable *pTab = 0;
+ SessionBuffer rec = {0, 0, 0};
while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){
const char *zNew;
@@ -223124,6 +228741,9 @@ static int sessionChangesetToHash(
SessionChange *pExist = 0;
SessionChange **pp;
+ /* Ensure that only changesets, or only patchsets, but not a mixture
+ ** of both, are being combined. It is an error to try to combine a
+ ** changeset and a patchset. */
if( pGrp->pList==0 ){
pGrp->bPatch = pIter->bPatchset;
}else if( pIter->bPatchset!=pGrp->bPatch ){
@@ -223156,18 +228776,38 @@ static int sessionChangesetToHash(
pTab->zName = (char*)&pTab->abPK[nCol];
memcpy(pTab->zName, zNew, nNew+1);
+ if( pGrp->db ){
+ pTab->nCol = 0;
+ rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb);
+ if( rc ){
+ assert( pTab->azCol==0 );
+ sqlite3_free(pTab);
+ break;
+ }
+ }
+
/* The new object must be linked on to the end of the list, not
** simply added to the start of it. This is to ensure that the
** tables within the output of sqlite3changegroup_output() are in
** the right order. */
for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext);
*ppTab = pTab;
- }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){
+ }
+
+ if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){
rc = SQLITE_SCHEMA;
break;
}
}
+ if( nCol<pTab->nCol ){
+ assert( pGrp->db );
+ rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec);
+ if( rc ) break;
+ aRec = rec.aBuf;
+ nRec = rec.nBuf;
+ }
+
if( sessionGrowHash(0, pIter->bPatchset, pTab) ){
rc = SQLITE_NOMEM;
break;
@@ -223205,6 +228845,7 @@ static int sessionChangesetToHash(
}
}
+ sqlite3_free(rec.aBuf);
if( rc==SQLITE_OK ) rc = pIter->rc;
return rc;
}
@@ -223292,6 +228933,31 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp){
}
/*
+** Provide a database schema to the changegroup object.
+*/
+SQLITE_API int sqlite3changegroup_schema(
+ sqlite3_changegroup *pGrp,
+ sqlite3 *db,
+ const char *zDb
+){
+ int rc = SQLITE_OK;
+
+ if( pGrp->pList || pGrp->db ){
+ /* Cannot add a schema after one or more calls to sqlite3changegroup_add(),
+ ** or after sqlite3changegroup_schema() has already been called. */
+ rc = SQLITE_MISUSE;
+ }else{
+ pGrp->zDb = sqlite3_mprintf("%s", zDb);
+ if( pGrp->zDb==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pGrp->db = db;
+ }
+ }
+ return rc;
+}
+
+/*
** Add the changeset currently stored in buffer pData, size nData bytes,
** to changeset-group p.
*/
@@ -223354,6 +229020,7 @@ SQLITE_API int sqlite3changegroup_output_strm(
*/
SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){
if( pGrp ){
+ sqlite3_free(pGrp->zDb);
sessionDeleteTable(0, pGrp->pList);
sqlite3_free(pGrp);
}
@@ -223886,8 +229553,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -223897,8 +229567,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -223914,12 +229586,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -223945,6 +229618,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -224059,6 +229736,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -224096,6 +229806,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -224290,8 +230007,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -224339,7 +230056,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -224348,7 +230065,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -224356,7 +230073,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
@@ -224528,6 +230245,10 @@ typedef struct Fts5Config Fts5Config;
** attempt to merge together. A value of 1 sets the object to use the
** compile time default. Zero disables auto-merge altogether.
**
+** bContentlessDelete:
+** True if the contentless_delete option was present in the CREATE
+** VIRTUAL TABLE statement.
+**
** zContent:
**
** zContentRowid:
@@ -224562,9 +230283,11 @@ struct Fts5Config {
int nPrefix; /* Number of prefix indexes */
int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */
int eContent; /* An FTS5_CONTENT value */
+ int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */
char *zContent; /* content table */
char *zContentRowid; /* "content_rowid=" option value */
int bColumnsize; /* "columnsize=" option value (dflt==1) */
+ int bTokendata; /* "tokendata=" option value (dflt==0) */
int eDetail; /* FTS5_DETAIL_XXX value */
char *zContentExprlist;
Fts5Tokenizer *pTok;
@@ -224583,6 +230306,7 @@ struct Fts5Config {
char *zRank; /* Name of rank function */
char *zRankArgs; /* Arguments to rank function */
int bSecureDelete; /* 'secure-delete' */
+ int nDeleteMerge; /* 'deletemerge' */
/* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
char **pzErrmsg;
@@ -224752,17 +230476,19 @@ struct Fts5IndexIter {
/*
** Values used as part of the flags argument passed to IndexQuery().
*/
-#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
-#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
-#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
-#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
+#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */
+#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */
+#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */
+#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */
/* The following are used internally by the fts5_index.c module. They are
** defined here only to make it easier to avoid clashes with the flags
** above. */
-#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
-#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
-#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010
+#define FTS5INDEX_QUERY_NOOUTPUT 0x0020
+#define FTS5INDEX_QUERY_SKIPHASH 0x0040
+#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080
+#define FTS5INDEX_QUERY_SCANONETERM 0x0100
/*
** Create/destroy an Fts5Index object.
@@ -224831,6 +230557,10 @@ static void *sqlite3Fts5StructureRef(Fts5Index*);
static void sqlite3Fts5StructureRelease(void*);
static int sqlite3Fts5StructureTest(Fts5Index*, void*);
+/*
+** Used by xInstToken():
+*/
+static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*);
/*
** Insert or remove data to or from the index. Each time a document is
@@ -224905,6 +230635,16 @@ static int sqlite3Fts5IndexReset(Fts5Index *p);
static int sqlite3Fts5IndexLoadConfig(Fts5Index *p);
+static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin);
+static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid);
+
+static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*);
+
+/* Used to populate hash tables for xInstToken in detail=none/column mode. */
+static int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff
+);
+
/*
** End of interface to code in fts5_index.c.
**************************************************************************/
@@ -224989,6 +230729,11 @@ static int sqlite3Fts5HashWrite(
*/
static void sqlite3Fts5HashClear(Fts5Hash*);
+/*
+** Return true if the hash is empty, false otherwise.
+*/
+static int sqlite3Fts5HashIsEmpty(Fts5Hash*);
+
static int sqlite3Fts5HashQuery(
Fts5Hash*, /* Hash table to query */
int nPre,
@@ -225005,11 +230750,13 @@ static void sqlite3Fts5HashScanNext(Fts5Hash*);
static int sqlite3Fts5HashScanEof(Fts5Hash*);
static void sqlite3Fts5HashScanEntry(Fts5Hash *,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
);
+
/*
** End of interface to code in fts5_hash.c.
**************************************************************************/
@@ -225130,6 +230877,10 @@ static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**);
static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *);
+static int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*);
+static int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*);
+static void sqlite3Fts5ExprClearTokens(Fts5Expr*);
+
/*******************************************
** The fts5_expr.c API above this point is used by the other hand-written
** C code in this module. The interfaces below this point are called by
@@ -225253,7 +231004,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*);
#define FTS5_STAR 15
/* This file is automatically generated by Lemon from input grammar
-** source file "fts5parse.y". */
+** source file "fts5parse.y".
+*/
/*
** 2000-05-29
**
@@ -226843,15 +232595,19 @@ static int fts5CInstIterInit(
*/
typedef struct HighlightContext HighlightContext;
struct HighlightContext {
- CInstIter iter; /* Coalesced Instance Iterator */
- int iPos; /* Current token offset in zIn[] */
+ /* Constant parameters to fts5HighlightCb() */
int iRangeStart; /* First token to include */
int iRangeEnd; /* If non-zero, last token to include */
const char *zOpen; /* Opening highlight */
const char *zClose; /* Closing highlight */
const char *zIn; /* Input text */
int nIn; /* Size of input text in bytes */
- int iOff; /* Current offset within zIn[] */
+
+ /* Variables modified by fts5HighlightCb() */
+ CInstIter iter; /* Coalesced Instance Iterator */
+ int iPos; /* Current token offset in zIn[] */
+ int iOff; /* Have copied up to this offset in zIn[] */
+ int bOpen; /* True if highlight is open */
char *zOut; /* Output value */
};
@@ -226884,8 +232640,8 @@ static int fts5HighlightCb(
int tflags, /* Mask of FTS5_TOKEN_* flags */
const char *pToken, /* Buffer containing token */
int nToken, /* Size of token in bytes */
- int iStartOff, /* Start offset of token */
- int iEndOff /* End offset of token */
+ int iStartOff, /* Start byte offset of token */
+ int iEndOff /* End byte offset of token */
){
HighlightContext *p = (HighlightContext*)pContext;
int rc = SQLITE_OK;
@@ -226901,30 +232657,55 @@ static int fts5HighlightCb(
if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff;
}
- if( iPos==p->iter.iStart ){
+ /* If the parenthesis is open, and this token is not part of the current
+ ** phrase, and the starting byte offset of this token is past the point
+ ** that has currently been copied into the output buffer, close the
+ ** parenthesis. */
+ if( p->bOpen
+ && (iPos<=p->iter.iStart || p->iter.iStart<0)
+ && iStartOff>p->iOff
+ ){
+ fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->bOpen = 0;
+ }
+
+ /* If this is the start of a new phrase, and the highlight is not open:
+ **
+ ** * copy text from the input up to the start of the phrase, and
+ ** * open the highlight.
+ */
+ if( iPos==p->iter.iStart && p->bOpen==0 ){
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff);
fts5HighlightAppend(&rc, p, p->zOpen, -1);
p->iOff = iStartOff;
+ p->bOpen = 1;
}
if( iPos==p->iter.iEnd ){
- if( p->iRangeEnd>=0 && p->iter.iStart<p->iRangeStart ){
+ if( p->bOpen==0 ){
+ assert( p->iRangeEnd>=0 );
fts5HighlightAppend(&rc, p, p->zOpen, -1);
+ p->bOpen = 1;
}
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- fts5HighlightAppend(&rc, p, p->zClose, -1);
p->iOff = iEndOff;
+
if( rc==SQLITE_OK ){
rc = fts5CInstIterNext(&p->iter);
}
}
- if( p->iRangeEnd>=0 && iPos==p->iRangeEnd ){
- fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
- p->iOff = iEndOff;
- if( iPos>=p->iter.iStart && iPos<p->iter.iEnd ){
+ if( iPos==p->iRangeEnd ){
+ if( p->bOpen ){
+ if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
+ }
fts5HighlightAppend(&rc, p, p->zClose, -1);
+ p->bOpen = 0;
}
+ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
+ p->iOff = iEndOff;
}
return rc;
@@ -226956,8 +232737,10 @@ static void fts5HighlightFunction(
ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
ctx.iRangeEnd = -1;
rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn);
-
- if( ctx.zIn ){
+ if( rc==SQLITE_RANGE ){
+ sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC);
+ rc = SQLITE_OK;
+ }else if( ctx.zIn ){
if( rc==SQLITE_OK ){
rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter);
}
@@ -226965,6 +232748,9 @@ static void fts5HighlightFunction(
if( rc==SQLITE_OK ){
rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
}
+ if( ctx.bOpen ){
+ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1);
+ }
fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
if( rc==SQLITE_OK ){
@@ -227243,6 +233029,9 @@ static void fts5SnippetFunction(
if( rc==SQLITE_OK ){
rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
}
+ if( ctx.bOpen ){
+ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1);
+ }
if( ctx.iRangeEnd>=(nColSize-1) ){
fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);
}else{
@@ -227518,6 +233307,7 @@ static void sqlite3Fts5BufferAppendBlob(
){
if( nData ){
if( fts5BufferGrow(pRc, pBuf, nData) ) return;
+ assert( pBuf->p!=0 );
memcpy(&pBuf->p[pBuf->n], pData, nData);
pBuf->n += nData;
}
@@ -227619,6 +233409,7 @@ static int sqlite3Fts5PoslistNext64(
i64 *piOff /* IN/OUT: Current offset */
){
int i = *pi;
+ assert( a!=0 || i==0 );
if( i>=n ){
/* EOF */
*piOff = -1;
@@ -227626,6 +233417,7 @@ static int sqlite3Fts5PoslistNext64(
}else{
i64 iOff = *piOff;
u32 iVal;
+ assert( a!=0 );
fts5FastGetVarint32(a, i, iVal);
if( iVal<=1 ){
if( iVal==0 ){
@@ -227881,6 +233673,8 @@ static void sqlite3Fts5TermsetFree(Fts5Termset *p){
#define FTS5_DEFAULT_CRISISMERGE 16
#define FTS5_DEFAULT_HASHSIZE (1024*1024)
+#define FTS5_DEFAULT_DELETE_AUTOMERGE 10 /* default 10% */
+
/* Maximum allowed page size */
#define FTS5_MAX_PAGE_SIZE (64*1024)
@@ -228211,6 +234005,16 @@ static int fts5ConfigParseSpecial(
return rc;
}
+ if( sqlite3_strnicmp("contentless_delete", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bContentlessDelete = (zArg[0]=='1');
+ }
+ return rc;
+ }
+
if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){
if( pConfig->zContentRowid ){
*pzErr = sqlite3_mprintf("multiple content_rowid=... directives");
@@ -228245,6 +234049,16 @@ static int fts5ConfigParseSpecial(
return rc;
}
+ if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){
+ *pzErr = sqlite3_mprintf("malformed tokendata=... directive");
+ rc = SQLITE_ERROR;
+ }else{
+ pConfig->bTokendata = (zArg[0]=='1');
+ }
+ return rc;
+ }
+
*pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd);
return SQLITE_ERROR;
}
@@ -228455,6 +234269,28 @@ static int sqlite3Fts5ConfigParse(
sqlite3_free(zTwo);
}
+ /* We only allow contentless_delete=1 if the table is indeed contentless. */
+ if( rc==SQLITE_OK
+ && pRet->bContentlessDelete
+ && pRet->eContent!=FTS5_CONTENT_NONE
+ ){
+ *pzErr = sqlite3_mprintf(
+ "contentless_delete=1 requires a contentless table"
+ );
+ rc = SQLITE_ERROR;
+ }
+
+ /* We only allow contentless_delete=1 if columnsize=0 is not present.
+ **
+ ** This restriction may be removed at some point.
+ */
+ if( rc==SQLITE_OK && pRet->bContentlessDelete && pRet->bColumnsize==0 ){
+ *pzErr = sqlite3_mprintf(
+ "contentless_delete=1 is incompatible with columnsize=0"
+ );
+ rc = SQLITE_ERROR;
+ }
+
/* If a tokenizer= option was successfully parsed, the tokenizer has
** already been allocated. Otherwise, allocate an instance of the default
** tokenizer (unicode61) now. */
@@ -228749,6 +234585,18 @@ static int sqlite3Fts5ConfigSetValue(
}
}
+ else if( 0==sqlite3_stricmp(zKey, "deletemerge") ){
+ int nVal = -1;
+ if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
+ nVal = sqlite3_value_int(pVal);
+ }else{
+ *pbBadkey = 1;
+ }
+ if( nVal<0 ) nVal = FTS5_DEFAULT_DELETE_AUTOMERGE;
+ if( nVal>100 ) nVal = 0;
+ pConfig->nDeleteMerge = nVal;
+ }
+
else if( 0==sqlite3_stricmp(zKey, "rank") ){
const char *zIn = (const char*)sqlite3_value_text(pVal);
char *zRank;
@@ -228797,6 +234645,7 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){
pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE;
pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE;
+ pConfig->nDeleteMerge = FTS5_DEFAULT_DELETE_AUTOMERGE;
zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
if( zSql ){
@@ -228943,7 +234792,9 @@ struct Fts5ExprNode {
struct Fts5ExprTerm {
u8 bPrefix; /* True for a prefix term */
u8 bFirst; /* True if token must be first in column */
- char *zTerm; /* nul-terminated term */
+ char *pTerm; /* Term data */
+ int nQueryTerm; /* Effective size of term in bytes */
+ int nFullTerm; /* Size of term in bytes incl. tokendata */
Fts5IndexIter *pIter; /* Iterator for this term */
Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */
};
@@ -229810,7 +235661,7 @@ static int fts5ExprNearInitAll(
p->pIter = 0;
}
rc = sqlite3Fts5IndexQuery(
- pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
+ pExpr->pIndex, p->pTerm, p->nQueryTerm,
(pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
(pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
pNear->pColset,
@@ -230447,7 +236298,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){
Fts5ExprTerm *pSyn;
Fts5ExprTerm *pNext;
Fts5ExprTerm *pTerm = &pPhrase->aTerm[i];
- sqlite3_free(pTerm->zTerm);
+ sqlite3_free(pTerm->pTerm);
sqlite3Fts5IterClose(pTerm->pIter);
for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){
pNext = pSyn->pSynonym;
@@ -230545,6 +236396,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset(
typedef struct TokenCtx TokenCtx;
struct TokenCtx {
Fts5ExprPhrase *pPhrase;
+ Fts5Config *pConfig;
int rc;
};
@@ -230578,8 +236430,12 @@ static int fts5ParseTokenize(
rc = SQLITE_NOMEM;
}else{
memset(pSyn, 0, (size_t)nByte);
- pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
- memcpy(pSyn->zTerm, pToken, nToken);
+ pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer);
+ pSyn->nFullTerm = pSyn->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata ){
+ pSyn->nQueryTerm = (int)strlen(pSyn->pTerm);
+ }
+ memcpy(pSyn->pTerm, pToken, nToken);
pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym;
pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn;
}
@@ -230604,7 +236460,11 @@ static int fts5ParseTokenize(
if( rc==SQLITE_OK ){
pTerm = &pPhrase->aTerm[pPhrase->nTerm++];
memset(pTerm, 0, sizeof(Fts5ExprTerm));
- pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken);
+ pTerm->nFullTerm = pTerm->nQueryTerm = nToken;
+ if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){
+ pTerm->nQueryTerm = (int)strlen(pTerm->pTerm);
+ }
}
}
@@ -230671,6 +236531,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
memset(&sCtx, 0, sizeof(TokenCtx));
sCtx.pPhrase = pAppend;
+ sCtx.pConfig = pConfig;
rc = fts5ParseStringFromToken(pToken, &z);
if( rc==SQLITE_OK ){
@@ -230718,12 +236579,15 @@ static int sqlite3Fts5ExprClonePhrase(
Fts5Expr **ppNew
){
int rc = SQLITE_OK; /* Return code */
- Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
+ Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */
Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
- TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
-
- pOrig = pExpr->apExprPhrase[iPhrase];
- pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ rc = SQLITE_RANGE;
+ }else{
+ pOrig = pExpr->apExprPhrase[iPhrase];
+ pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr));
+ }
if( rc==SQLITE_OK ){
pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprPhrase*));
@@ -230736,7 +236600,7 @@ static int sqlite3Fts5ExprClonePhrase(
pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc,
sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*));
}
- if( rc==SQLITE_OK ){
+ if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){
Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset;
if( pColsetOrig ){
sqlite3_int64 nByte;
@@ -230750,26 +236614,27 @@ static int sqlite3Fts5ExprClonePhrase(
}
}
- if( pOrig->nTerm ){
- int i; /* Used to iterate through phrase terms */
- for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
- int tflags = 0;
- Fts5ExprTerm *p;
- for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
- const char *zTerm = p->zTerm;
- rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
- 0, 0);
- tflags = FTS5_TOKEN_COLOCATED;
- }
- if( rc==SQLITE_OK ){
- sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
- sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ if( rc==SQLITE_OK ){
+ if( pOrig->nTerm ){
+ int i; /* Used to iterate through phrase terms */
+ sCtx.pConfig = pExpr->pConfig;
+ for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
+ int tflags = 0;
+ Fts5ExprTerm *p;
+ for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
+ rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0);
+ tflags = FTS5_TOKEN_COLOCATED;
+ }
+ if( rc==SQLITE_OK ){
+ sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst;
+ }
}
+ }else{
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
}
- }else{
- /* This happens when parsing a token or quoted phrase that contains
- ** no token characters at all. (e.g ... MATCH '""'). */
- sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
}
if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){
@@ -231139,11 +237004,13 @@ static Fts5ExprNode *fts5ParsePhraseToAnd(
if( parseGrowPhraseArray(pParse) ){
fts5ExprPhraseFree(pPhrase);
}else{
+ Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii];
+ Fts5ExprTerm *pTo = &pPhrase->aTerm[0];
pParse->apPhrase[pParse->nPhrase++] = pPhrase;
pPhrase->nTerm = 1;
- pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup(
- &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1
- );
+ pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm);
+ pTo->nQueryTerm = p->nQueryTerm;
+ pTo->nFullTerm = p->nFullTerm;
pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING,
0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase)
);
@@ -231320,7 +237187,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
return pRet;
}
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
sqlite3_int64 nByte = 0;
Fts5ExprTerm *p;
@@ -231328,16 +237195,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
/* Determine the maximum amount of space required. */
for(p=pTerm; p; p=p->pSynonym){
- nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2;
+ nByte += pTerm->nQueryTerm * 2 + 3 + 2;
}
zQuoted = sqlite3_malloc64(nByte);
if( zQuoted ){
int i = 0;
for(p=pTerm; p; p=p->pSynonym){
- char *zIn = p->zTerm;
+ char *zIn = p->pTerm;
+ char *zEnd = &zIn[p->nQueryTerm];
zQuoted[i++] = '"';
- while( *zIn ){
+ while( zIn<zEnd ){
if( *zIn=='"' ) zQuoted[i++] = '"';
zQuoted[i++] = *zIn++;
}
@@ -231415,8 +237283,10 @@ static char *fts5ExprPrintTcl(
zRet = fts5PrintfAppend(zRet, " {");
for(iTerm=0; zRet && iTerm<pPhrase->nTerm; iTerm++){
- char *zTerm = pPhrase->aTerm[iTerm].zTerm;
- zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm);
+ Fts5ExprTerm *p = &pPhrase->aTerm[iTerm];
+ zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ",
+ p->nQueryTerm, p->pTerm
+ );
if( pPhrase->aTerm[iTerm].bPrefix ){
zRet = fts5PrintfAppend(zRet, "*");
}
@@ -231426,6 +237296,8 @@ static char *fts5ExprPrintTcl(
if( zRet==0 ) return 0;
}
+ }else if( pExpr->eType==0 ){
+ zRet = sqlite3_mprintf("{}");
}else{
char const *zOp = 0;
int i;
@@ -231687,14 +237559,14 @@ static void fts5ExprFold(
sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics));
}
}
-#endif /* ifdef SQLITE_TEST */
+#endif /* if SQLITE_TEST || SQLITE_FTS5_DEBUG */
/*
** This is called during initialization to register the fts5_expr() scalar
** UDF with the SQLite handle passed as the only argument.
*/
static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
struct Fts5ExprFunc {
const char *z;
void (*x)(sqlite3_context*,int,sqlite3_value**);
@@ -231815,6 +237687,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){
return 0;
}
+/*
+** pToken is a buffer nToken bytes in size that may or may not contain
+** an embedded 0x00 byte. If it does, return the number of bytes in
+** the buffer before the 0x00. If it does not, return nToken.
+*/
+static int fts5QueryTerm(const char *pToken, int nToken){
+ int ii;
+ for(ii=0; ii<nToken && pToken[ii]; ii++){}
+ return ii;
+}
+
static int fts5ExprPopulatePoslistsCb(
void *pCtx, /* Copy of 2nd argument to xTokenize() */
int tflags, /* Mask of FTS5_TOKEN_* flags */
@@ -231826,22 +237709,33 @@ static int fts5ExprPopulatePoslistsCb(
Fts5ExprCtx *p = (Fts5ExprCtx*)pCtx;
Fts5Expr *pExpr = p->pExpr;
int i;
+ int nQuery = nToken;
+ i64 iRowid = pExpr->pRoot->iRowid;
UNUSED_PARAM2(iUnused1, iUnused2);
- if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE;
+ if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE;
+ if( pExpr->pConfig->bTokendata ){
+ nQuery = fts5QueryTerm(pToken, nQuery);
+ }
if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++;
for(i=0; i<pExpr->nPhrase; i++){
- Fts5ExprTerm *pTerm;
+ Fts5ExprTerm *pT;
if( p->aPopulator[i].bOk==0 ) continue;
- for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){
- int nTerm = (int)strlen(pTerm->zTerm);
- if( (nTerm==nToken || (nTerm<nToken && pTerm->bPrefix))
- && memcmp(pTerm->zTerm, pToken, nTerm)==0
+ for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){
+ if( (pT->nQueryTerm==nQuery || (pT->nQueryTerm<nQuery && pT->bPrefix))
+ && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0
){
int rc = sqlite3Fts5PoslistWriterAppend(
&pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff
);
+ if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){
+ int iCol = p->iOff>>32;
+ int iTokOff = p->iOff & 0x7FFFFFFF;
+ rc = sqlite3Fts5IndexIterWriteTokendata(
+ pT->pIter, pToken, nToken, iRowid, iCol, iTokOff
+ );
+ }
if( rc ) return rc;
break;
}
@@ -231978,6 +237872,83 @@ static int sqlite3Fts5ExprPhraseCollist(
}
/*
+** Does the work of the fts5_api.xQueryToken() API method.
+*/
+static int sqlite3Fts5ExprQueryToken(
+ Fts5Expr *pExpr,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+
+ *ppOut = pPhrase->aTerm[iToken].pTerm;
+ *pnOut = pPhrase->aTerm[iToken].nFullTerm;
+ return SQLITE_OK;
+}
+
+/*
+** Does the work of the fts5_api.xInstToken() API method.
+*/
+static int sqlite3Fts5ExprInstToken(
+ Fts5Expr *pExpr,
+ i64 iRowid,
+ int iPhrase,
+ int iCol,
+ int iOff,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5ExprPhrase *pPhrase = 0;
+ Fts5ExprTerm *pTerm = 0;
+ int rc = SQLITE_OK;
+
+ if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){
+ return SQLITE_RANGE;
+ }
+ pPhrase = pExpr->apExprPhrase[iPhrase];
+ if( iToken<0 || iToken>=pPhrase->nTerm ){
+ return SQLITE_RANGE;
+ }
+ pTerm = &pPhrase->aTerm[iToken];
+ if( pTerm->bPrefix==0 ){
+ if( pExpr->pConfig->bTokendata ){
+ rc = sqlite3Fts5IterToken(
+ pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut
+ );
+ }else{
+ *ppOut = pTerm->pTerm;
+ *pnOut = pTerm->nFullTerm;
+ }
+ }
+ return rc;
+}
+
+/*
+** Clear the token mappings for all Fts5IndexIter objects mannaged by
+** the expression passed as the only argument.
+*/
+static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){
+ int ii;
+ for(ii=0; ii<pExpr->nPhrase; ii++){
+ Fts5ExprTerm *pT;
+ for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){
+ sqlite3Fts5IndexIterClearTokendata(pT->pIter);
+ }
+ }
+}
+
+/*
** 2014 August 11
**
** The author disclaims copyright to this source code. In place of
@@ -232015,10 +237986,15 @@ struct Fts5Hash {
/*
** Each entry in the hash table is represented by an object of the
-** following type. Each object, its key (a nul-terminated string) and
-** its current data are stored in a single memory allocation. The
-** key immediately follows the object in memory. The position list
-** data immediately follows the key data in memory.
+** following type. Each object, its key, and its current data are stored
+** in a single memory allocation. The key immediately follows the object
+** in memory. The position list data immediately follows the key data
+** in memory.
+**
+** The key is Fts5HashEntry.nKey bytes in size. It consists of a single
+** byte identifying the index (either the main term index or a prefix-index),
+** followed by the term data. For example: "0token". There is no
+** nul-terminator - in this case nKey=6.
**
** The data that follows the key is in a similar, but not identical format
** to the doclist data stored in the database. It is:
@@ -232153,8 +238129,7 @@ static int fts5HashResize(Fts5Hash *pHash){
unsigned int iHash;
Fts5HashEntry *p = apOld[i];
apOld[i] = p->pHashNext;
- iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p),
- (int)strlen(fts5EntryKey(p)));
+ iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey);
p->pHashNext = apNew[iHash];
apNew[iHash] = p;
}
@@ -232238,7 +238213,7 @@ static int sqlite3Fts5HashWrite(
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
char *zKey = fts5EntryKey(p);
if( zKey[0]==bByte
- && p->nKey==nToken
+ && p->nKey==nToken+1
&& memcmp(&zKey[1], pToken, nToken)==0
){
break;
@@ -232268,9 +238243,9 @@ static int sqlite3Fts5HashWrite(
zKey[0] = bByte;
memcpy(&zKey[1], pToken, nToken);
assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) );
- p->nKey = nToken;
+ p->nKey = nToken+1;
zKey[nToken+1] = '\0';
- p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry);
+ p->nData = nToken+1 + sizeof(Fts5HashEntry);
p->pHashNext = pHash->aSlot[iHash];
pHash->aSlot[iHash] = p;
pHash->nEntry++;
@@ -232387,12 +238362,17 @@ static Fts5HashEntry *fts5HashEntryMerge(
*ppOut = p1;
p1 = 0;
}else{
- int i = 0;
char *zKey1 = fts5EntryKey(p1);
char *zKey2 = fts5EntryKey(p2);
- while( zKey1[i]==zKey2[i] ) i++;
+ int nMin = MIN(p1->nKey, p2->nKey);
- if( ((u8)zKey1[i])>((u8)zKey2[i]) ){
+ int cmp = memcmp(zKey1, zKey2, nMin);
+ if( cmp==0 ){
+ cmp = p1->nKey - p2->nKey;
+ }
+ assert( cmp!=0 );
+
+ if( cmp>0 ){
/* p2 is smaller */
*ppOut = p2;
ppOut = &p2->pScanNext;
@@ -232411,10 +238391,8 @@ static Fts5HashEntry *fts5HashEntryMerge(
}
/*
-** Extract all tokens from hash table iHash and link them into a list
-** in sorted order. The hash table is cleared before returning. It is
-** the responsibility of the caller to free the elements of the returned
-** list.
+** Link all tokens from hash table iHash into a list in sorted order. The
+** tokens are not removed from the hash table.
*/
static int fts5HashEntrySort(
Fts5Hash *pHash,
@@ -232436,7 +238414,7 @@ static int fts5HashEntrySort(
Fts5HashEntry *pIter;
for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){
if( pTerm==0
- || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
+ || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm))
){
Fts5HashEntry *pEntry = pIter;
pEntry->pScanNext = 0;
@@ -232454,7 +238432,6 @@ static int fts5HashEntrySort(
pList = fts5HashEntryMerge(pList, ap[i]);
}
- pHash->nEntry = 0;
sqlite3_free(ap);
*ppSorted = pList;
return SQLITE_OK;
@@ -232476,12 +238453,11 @@ static int sqlite3Fts5HashQuery(
for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){
zKey = fts5EntryKey(p);
- assert( p->nKey+1==(int)strlen(zKey) );
- if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break;
+ if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break;
}
if( p ){
- int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1;
+ int nHashPre = sizeof(Fts5HashEntry) + nTerm;
int nList = p->nData - nHashPre;
u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10));
if( pRet ){
@@ -232508,6 +238484,28 @@ static int sqlite3Fts5HashScanInit(
return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan);
}
+#ifdef SQLITE_DEBUG
+static int fts5HashCount(Fts5Hash *pHash){
+ int nEntry = 0;
+ int ii;
+ for(ii=0; ii<pHash->nSlot; ii++){
+ Fts5HashEntry *p = 0;
+ for(p=pHash->aSlot[ii]; p; p=p->pHashNext){
+ nEntry++;
+ }
+ }
+ return nEntry;
+}
+#endif
+
+/*
+** Return true if the hash table is empty, false otherwise.
+*/
+static int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){
+ assert( pHash->nEntry==fts5HashCount(pHash) );
+ return pHash->nEntry==0;
+}
+
static void sqlite3Fts5HashScanNext(Fts5Hash *p){
assert( !sqlite3Fts5HashScanEof(p) );
p->pScan = p->pScan->pScanNext;
@@ -232520,19 +238518,22 @@ static int sqlite3Fts5HashScanEof(Fts5Hash *p){
static void sqlite3Fts5HashScanEntry(
Fts5Hash *pHash,
const char **pzTerm, /* OUT: term (nul-terminated) */
+ int *pnTerm, /* OUT: Size of term in bytes */
const u8 **ppDoclist, /* OUT: pointer to doclist */
int *pnDoclist /* OUT: size of doclist in bytes */
){
Fts5HashEntry *p;
if( (p = pHash->pScan) ){
char *zKey = fts5EntryKey(p);
- int nTerm = (int)strlen(zKey);
+ int nTerm = p->nKey;
fts5HashAddPoslistSize(pHash, p, 0);
*pzTerm = zKey;
- *ppDoclist = (const u8*)&zKey[nTerm+1];
- *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1);
+ *pnTerm = nTerm;
+ *ppDoclist = (const u8*)&zKey[nTerm];
+ *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm);
}else{
*pzTerm = 0;
+ *pnTerm = 0;
*ppDoclist = 0;
*pnDoclist = 0;
}
@@ -232597,13 +238598,31 @@ static void sqlite3Fts5HashScanEntry(
#define FTS5_MAX_LEVEL 64
/*
+** There are two versions of the format used for the structure record:
+**
+** 1. the legacy format, that may be read by all fts5 versions, and
+**
+** 2. the V2 format, which is used by contentless_delete=1 databases.
+**
+** Both begin with a 4-byte "configuration cookie" value. Then, a legacy
+** format structure record contains a varint - the number of levels in
+** the structure. Whereas a V2 structure record contains the constant
+** 4 bytes [0xff 0x00 0x00 0x01]. This is unambiguous as the value of a
+** varint has to be at least 16256 to begin with "0xFF". And the default
+** maximum number of levels is 64.
+**
+** See below for more on structure record formats.
+*/
+#define FTS5_STRUCTURE_V2 "\xFF\x00\x00\x01"
+
+/*
** Details:
**
** The %_data table managed by this module,
**
** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB);
**
-** , contains the following 5 types of records. See the comments surrounding
+** , contains the following 6 types of records. See the comments surrounding
** the FTS5_*_ROWID macros below for a description of how %_data rowids are
** assigned to each fo them.
**
@@ -232612,12 +238631,12 @@ static void sqlite3Fts5HashScanEntry(
** The set of segments that make up an index - the index structure - are
** recorded in a single record within the %_data table. The record consists
** of a single 32-bit configuration cookie value followed by a list of
-** SQLite varints. If the FTS table features more than one index (because
-** there are one or more prefix indexes), it is guaranteed that all share
-** the same cookie value.
+** SQLite varints.
+**
+** If the structure record is a V2 record, the configuration cookie is
+** followed by the following 4 bytes: [0xFF 0x00 0x00 0x01].
**
-** Immediately following the configuration cookie, the record begins with
-** three varints:
+** Next, the record continues with three varints:
**
** + number of levels,
** + total number of segments on all levels,
@@ -232632,6 +238651,12 @@ static void sqlite3Fts5HashScanEntry(
** + first leaf page number (often 1, always greater than 0)
** + final leaf page number
**
+** Then, for V2 structures only:
+**
+** + lower origin counter value,
+** + upper origin counter value,
+** + the number of tombstone hash pages.
+**
** 2. The Averages Record:
**
** A single record within the %_data table. The data is a list of varints.
@@ -232747,6 +238772,38 @@ static void sqlite3Fts5HashScanEntry(
** * A list of delta-encoded varints - the first rowid on each subsequent
** child page.
**
+** 6. Tombstone Hash Page
+**
+** These records are only ever present in contentless_delete=1 tables.
+** There are zero or more of these associated with each segment. They
+** are used to store the tombstone rowids for rows contained in the
+** associated segments.
+**
+** The set of nHashPg tombstone hash pages associated with a single
+** segment together form a single hash table containing tombstone rowids.
+** To find the page of the hash on which a key might be stored:
+**
+** iPg = (rowid % nHashPg)
+**
+** Then, within page iPg, which has nSlot slots:
+**
+** iSlot = (rowid / nHashPg) % nSlot
+**
+** Each tombstone hash page begins with an 8 byte header:
+**
+** 1-byte: Key-size (the size in bytes of each slot). Either 4 or 8.
+** 1-byte: rowid-0-tombstone flag. This flag is only valid on the
+** first tombstone hash page for each segment (iPg=0). If set,
+** the hash table contains rowid 0. If clear, it does not.
+** Rowid 0 is handled specially.
+** 2-bytes: unused.
+** 4-bytes: Big-endian integer containing number of entries on page.
+**
+** Following this are nSlot 4 or 8 byte slots (depending on the key-size
+** in the first byte of the page header). The number of slots may be
+** determined based on the size of the page record and the key-size:
+**
+** nSlot = (nByte - 8) / key-size
*/
/*
@@ -232780,6 +238837,7 @@ static void sqlite3Fts5HashScanEntry(
#define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno)
#define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno)
+#define FTS5_TOMBSTONE_ROWID(segid,ipg) fts5_dri(segid+(1<<16), 0, 0, ipg)
#ifdef SQLITE_DEBUG
static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; }
@@ -232806,6 +238864,9 @@ typedef struct Fts5SegWriter Fts5SegWriter;
typedef struct Fts5Structure Fts5Structure;
typedef struct Fts5StructureLevel Fts5StructureLevel;
typedef struct Fts5StructureSegment Fts5StructureSegment;
+typedef struct Fts5TokenDataIter Fts5TokenDataIter;
+typedef struct Fts5TokenDataMap Fts5TokenDataMap;
+typedef struct Fts5TombstoneArray Fts5TombstoneArray;
struct Fts5Data {
u8 *p; /* Pointer to buffer containing record */
@@ -232815,6 +238876,12 @@ struct Fts5Data {
/*
** One object per %_data table.
+**
+** nContentlessDelete:
+** The number of contentless delete operations since the most recent
+** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked
+** so that extra auto-merge work can be done by fts5IndexFlush() to
+** account for the delete operations.
*/
struct Fts5Index {
Fts5Config *pConfig; /* Virtual table configuration */
@@ -232829,9 +238896,12 @@ struct Fts5Index {
int nPendingData; /* Current bytes of pending data */
i64 iWriteRowid; /* Rowid for current doc being written */
int bDelete; /* Current write is a delete */
+ int nContentlessDelete; /* Number of contentless delete ops */
+ int nPendingRow; /* Number of INSERT in hash table */
/* Error state. */
int rc; /* Current error code */
+ int flushRc;
/* State used by the fts5DataXXX() functions. */
sqlite3_blob *pReader; /* RO incr-blob open on %_data table */
@@ -232840,6 +238910,7 @@ struct Fts5Index {
sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */
sqlite3_stmt *pIdxSelect;
+ sqlite3_stmt *pIdxNextSelect;
int nRead; /* Total number of blocks read */
sqlite3_stmt *pDeleteFromIdx;
@@ -232863,11 +238934,23 @@ struct Fts5DoclistIter {
** The contents of the "structure" record for each index are represented
** using an Fts5Structure record in memory. Which uses instances of the
** other Fts5StructureXXX types as components.
+**
+** nOriginCntr:
+** This value is set to non-zero for structure records created for
+** contentlessdelete=1 tables only. In that case it represents the
+** origin value to apply to the next top-level segment created.
*/
struct Fts5StructureSegment {
int iSegid; /* Segment id */
int pgnoFirst; /* First leaf page number in segment */
int pgnoLast; /* Last leaf page number in segment */
+
+ /* contentlessdelete=1 tables only: */
+ u64 iOrigin1;
+ u64 iOrigin2;
+ int nPgTombstone; /* Number of tombstone hash table pages */
+ u64 nEntryTombstone; /* Number of tombstone entries that "count" */
+ u64 nEntry; /* Number of rows in this segment */
};
struct Fts5StructureLevel {
int nMerge; /* Number of segments in incr-merge */
@@ -232877,6 +238960,7 @@ struct Fts5StructureLevel {
struct Fts5Structure {
int nRef; /* Object reference count */
u64 nWriteCounter; /* Total leaves written to level 0 */
+ u64 nOriginCntr; /* Origin value for next top-level segment */
int nSegment; /* Total segments in this structure */
int nLevel; /* Number of levels in this index */
Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */
@@ -232965,6 +239049,13 @@ struct Fts5CResult {
**
** iTermIdx:
** Index of current term on iTermLeafPgno.
+**
+** apTombstone/nTombstone:
+** These are used for contentless_delete=1 tables only. When the cursor
+** is first allocated, the apTombstone[] array is allocated so that it
+** is large enough for all tombstones hash pages associated with the
+** segment. The pages themselves are loaded lazily from the database as
+** they are required.
*/
struct Fts5SegIter {
Fts5StructureSegment *pSeg; /* Segment to iterate through */
@@ -232973,6 +239064,7 @@ struct Fts5SegIter {
Fts5Data *pLeaf; /* Current leaf data */
Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */
i64 iLeafOffset; /* Byte offset within current leaf */
+ Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */
/* Next method */
void (*xNext)(Fts5Index*, Fts5SegIter*, int*);
@@ -233000,6 +239092,15 @@ struct Fts5SegIter {
};
/*
+** Array of tombstone pages. Reference counted.
+*/
+struct Fts5TombstoneArray {
+ int nRef; /* Number of pointers to this object */
+ int nTombstone;
+ Fts5Data *apTombstone[1]; /* Array of tombstone pages */
+};
+
+/*
** Argument is a pointer to an Fts5Data structure that contains a
** leaf page.
*/
@@ -233043,9 +239144,16 @@ struct Fts5SegIter {
** poslist:
** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered.
** There is no way to tell if this is populated or not.
+**
+** pColset:
+** If not NULL, points to an object containing a set of column indices.
+** Only matches that occur in one of these columns will be returned.
+** The Fts5Iter does not own the Fts5Colset object, and so it is not
+** freed when the iterator is closed - it is owned by the upper layer.
*/
struct Fts5Iter {
Fts5IndexIter base; /* Base class containing output vars */
+ Fts5TokenDataIter *pTokenDataIter;
Fts5Index *pIndex; /* Index that owns this iterator */
Fts5Buffer poslist; /* Buffer containing current poslist */
@@ -233063,7 +239171,6 @@ struct Fts5Iter {
Fts5SegIter aSeg[1]; /* Array of segment iterators */
};
-
/*
** An instance of the following type is used to iterate through the contents
** of a doclist-index record.
@@ -233103,6 +239210,60 @@ static u16 fts5GetU16(const u8 *aIn){
}
/*
+** The only argument points to a buffer at least 8 bytes in size. This
+** function interprets the first 8 bytes of the buffer as a 64-bit big-endian
+** unsigned integer and returns the result.
+*/
+static u64 fts5GetU64(u8 *a){
+ return ((u64)a[0] << 56)
+ + ((u64)a[1] << 48)
+ + ((u64)a[2] << 40)
+ + ((u64)a[3] << 32)
+ + ((u64)a[4] << 24)
+ + ((u64)a[5] << 16)
+ + ((u64)a[6] << 8)
+ + ((u64)a[7] << 0);
+}
+
+/*
+** The only argument points to a buffer at least 4 bytes in size. This
+** function interprets the first 4 bytes of the buffer as a 32-bit big-endian
+** unsigned integer and returns the result.
+*/
+static u32 fts5GetU32(const u8 *a){
+ return ((u32)a[0] << 24)
+ + ((u32)a[1] << 16)
+ + ((u32)a[2] << 8)
+ + ((u32)a[3] << 0);
+}
+
+/*
+** Write iVal, formated as a 64-bit big-endian unsigned integer, to the
+** buffer indicated by the first argument.
+*/
+static void fts5PutU64(u8 *a, u64 iVal){
+ a[0] = ((iVal >> 56) & 0xFF);
+ a[1] = ((iVal >> 48) & 0xFF);
+ a[2] = ((iVal >> 40) & 0xFF);
+ a[3] = ((iVal >> 32) & 0xFF);
+ a[4] = ((iVal >> 24) & 0xFF);
+ a[5] = ((iVal >> 16) & 0xFF);
+ a[6] = ((iVal >> 8) & 0xFF);
+ a[7] = ((iVal >> 0) & 0xFF);
+}
+
+/*
+** Write iVal, formated as a 32-bit big-endian unsigned integer, to the
+** buffer indicated by the first argument.
+*/
+static void fts5PutU32(u8 *a, u32 iVal){
+ a[0] = ((iVal >> 24) & 0xFF);
+ a[1] = ((iVal >> 16) & 0xFF);
+ a[2] = ((iVal >> 8) & 0xFF);
+ a[3] = ((iVal >> 0) & 0xFF);
+}
+
+/*
** Allocate and return a buffer at least nByte bytes in size.
**
** If an OOM error is encountered, return NULL and set the error code in
@@ -233329,10 +239490,17 @@ static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){
/*
** Remove all records associated with segment iSegid.
*/
-static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){
+static void fts5DataRemoveSegment(Fts5Index *p, Fts5StructureSegment *pSeg){
+ int iSegid = pSeg->iSegid;
i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0);
i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1;
fts5DataDelete(p, iFirst, iLast);
+
+ if( pSeg->nPgTombstone ){
+ i64 iTomb1 = FTS5_TOMBSTONE_ROWID(iSegid, 0);
+ i64 iTomb2 = FTS5_TOMBSTONE_ROWID(iSegid, pSeg->nPgTombstone-1);
+ fts5DataDelete(p, iTomb1, iTomb2);
+ }
if( p->pIdxDeleter==0 ){
Fts5Config *pConfig = p->pConfig;
fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf(
@@ -233443,11 +239611,19 @@ static int fts5StructureDecode(
int nSegment = 0;
sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */
Fts5Structure *pRet = 0; /* Structure object to return */
+ int bStructureV2 = 0; /* True for FTS5_STRUCTURE_V2 */
+ u64 nOriginCntr = 0; /* Largest origin value seen so far */
/* Grab the cookie value */
if( piCookie ) *piCookie = sqlite3Fts5Get32(pData);
i = 4;
+ /* Check if this is a V2 structure record. Set bStructureV2 if it is. */
+ if( 0==memcmp(&pData[i], FTS5_STRUCTURE_V2, 4) ){
+ i += 4;
+ bStructureV2 = 1;
+ }
+
/* Read the total number of levels and segments from the start of the
** structure record. */
i += fts5GetVarint32(&pData[i], nLevel);
@@ -233498,6 +239674,14 @@ static int fts5StructureDecode(
i += fts5GetVarint32(&pData[i], pSeg->iSegid);
i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst);
i += fts5GetVarint32(&pData[i], pSeg->pgnoLast);
+ if( bStructureV2 ){
+ i += fts5GetVarint(&pData[i], &pSeg->iOrigin1);
+ i += fts5GetVarint(&pData[i], &pSeg->iOrigin2);
+ i += fts5GetVarint32(&pData[i], pSeg->nPgTombstone);
+ i += fts5GetVarint(&pData[i], &pSeg->nEntryTombstone);
+ i += fts5GetVarint(&pData[i], &pSeg->nEntry);
+ nOriginCntr = MAX(nOriginCntr, pSeg->iOrigin2);
+ }
if( pSeg->pgnoLast<pSeg->pgnoFirst ){
rc = FTS5_CORRUPT;
break;
@@ -233508,6 +239692,9 @@ static int fts5StructureDecode(
}
}
if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT;
+ if( bStructureV2 ){
+ pRet->nOriginCntr = nOriginCntr+1;
+ }
if( rc!=SQLITE_OK ){
fts5StructureRelease(pRet);
@@ -233720,6 +239907,7 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
Fts5Buffer buf; /* Buffer to serialize record into */
int iLvl; /* Used to iterate through levels */
int iCookie; /* Cookie value to store */
+ int nHdr = (pStruct->nOriginCntr>0 ? (4+4+9+9+9) : (4+9+9));
assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
memset(&buf, 0, sizeof(Fts5Buffer));
@@ -233728,9 +239916,12 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
iCookie = p->pConfig->iCookie;
if( iCookie<0 ) iCookie = 0;
- if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){
+ if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, nHdr) ){
sqlite3Fts5Put32(buf.p, iCookie);
buf.n = 4;
+ if( pStruct->nOriginCntr>0 ){
+ fts5BufferSafeAppendBlob(&buf, FTS5_STRUCTURE_V2, 4);
+ }
fts5BufferSafeAppendVarint(&buf, pStruct->nLevel);
fts5BufferSafeAppendVarint(&buf, pStruct->nSegment);
fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter);
@@ -233744,9 +239935,17 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){
assert( pLvl->nMerge<=pLvl->nSeg );
for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst);
- fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast);
+ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iSegid);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoFirst);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoLast);
+ if( pStruct->nOriginCntr>0 ){
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin1);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin2);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nPgTombstone);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntryTombstone);
+ fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntry);
+ }
}
}
@@ -233889,9 +240088,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){
}
if( iOff<pData->nn ){
- i64 iVal;
+ u64 iVal;
pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1;
- iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal);
+ iOff += fts5GetVarint(&pData->p[iOff], &iVal);
pLvl->iRowid += iVal;
pLvl->iOff = iOff;
}else{
@@ -234270,6 +240469,25 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){
}
/*
+** Allocate a tombstone hash page array object (pIter->pTombArray) for
+** the iterator passed as the second argument. If an OOM error occurs,
+** leave an error in the Fts5Index object.
+*/
+static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){
+ const int nTomb = pIter->pSeg->nPgTombstone;
+ if( nTomb>0 ){
+ int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray);
+ Fts5TombstoneArray *pNew;
+ pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte);
+ if( pNew ){
+ pNew->nTombstone = nTomb;
+ pNew->nRef = 1;
+ pIter->pTombArray = pNew;
+ }
+ }
+}
+
+/*
** Initialize the iterator object pIter to iterate through the entries in
** segment pSeg. The iterator is left pointing to the first entry when
** this function returns.
@@ -234310,6 +240528,7 @@ static void fts5SegIterInit(
pIter->iPgidxOff = pIter->pLeaf->szLeaf+1;
fts5SegIterLoadTerm(p, pIter, 0);
fts5SegIterLoadNPos(p, pIter);
+ fts5SegIterAllocTombstone(p, pIter);
}
}
@@ -234520,15 +240739,16 @@ static void fts5SegIterNext_None(
}else{
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList;
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
if( pList==0 ) goto next_none_eof;
pIter->pLeaf->p = (u8*)pList;
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList;
- sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
}
@@ -234594,11 +240814,12 @@ static void fts5SegIterNext(
}else if( pIter->pSeg==0 ){
const u8 *pList = 0;
const char *zTerm = 0;
+ int nTerm = 0;
int nList = 0;
assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm );
if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){
sqlite3Fts5HashScanNext(p->pHash);
- sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList);
+ sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList);
}
if( pList==0 ){
fts5DataRelease(pIter->pLeaf);
@@ -234608,8 +240829,7 @@ static void fts5SegIterNext(
pIter->pLeaf->nn = nList;
pIter->pLeaf->szLeaf = nList;
pIter->iEndofDoclist = nList+1;
- sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm),
- (u8*)zTerm);
+ sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm);
pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid);
*pbNewTerm = 1;
}
@@ -234995,7 +241215,7 @@ static void fts5SegIterSeekInit(
fts5LeafSeek(p, bGe, pIter, pTerm, nTerm);
}
- if( p->rc==SQLITE_OK && bGe==0 ){
+ if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){
pIter->flags |= FTS5_SEGITER_ONETERM;
if( pIter->pLeaf ){
if( flags & FTS5INDEX_QUERY_DESC ){
@@ -235011,6 +241231,9 @@ static void fts5SegIterSeekInit(
}
fts5SegIterSetNext(p, pIter);
+ if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){
+ fts5SegIterAllocTombstone(p, pIter);
+ }
/* Either:
**
@@ -235027,6 +241250,79 @@ static void fts5SegIterSeekInit(
);
}
+
+/*
+** SQL used by fts5SegIterNextInit() to find the page to open.
+*/
+static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){
+ if( p->pIdxNextSelect==0 ){
+ Fts5Config *pConfig = p->pConfig;
+ fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf(
+ "SELECT pgno FROM '%q'.'%q_idx' WHERE "
+ "segid=? AND term>? ORDER BY term ASC LIMIT 1",
+ pConfig->zDb, pConfig->zName
+ ));
+
+ }
+ return p->pIdxNextSelect;
+}
+
+/*
+** This is similar to fts5SegIterSeekInit(), except that it initializes
+** the segment iterator to point to the first term following the page
+** with pToken/nToken on it.
+*/
+static void fts5SegIterNextInit(
+ Fts5Index *p,
+ const char *pTerm, int nTerm,
+ Fts5StructureSegment *pSeg, /* Description of segment */
+ Fts5SegIter *pIter /* Object to populate */
+){
+ int iPg = -1; /* Page of segment to open */
+ int bDlidx = 0;
+ sqlite3_stmt *pSel = 0; /* SELECT to find iPg */
+
+ pSel = fts5IdxNextStmt(p);
+ if( pSel ){
+ assert( p->rc==SQLITE_OK );
+ sqlite3_bind_int(pSel, 1, pSeg->iSegid);
+ sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC);
+
+ if( sqlite3_step(pSel)==SQLITE_ROW ){
+ i64 val = sqlite3_column_int64(pSel, 0);
+ iPg = (int)(val>>1);
+ bDlidx = (val & 0x0001);
+ }
+ p->rc = sqlite3_reset(pSel);
+ sqlite3_bind_null(pSel, 2);
+ if( p->rc ) return;
+ }
+
+ memset(pIter, 0, sizeof(*pIter));
+ pIter->pSeg = pSeg;
+ pIter->flags |= FTS5_SEGITER_ONETERM;
+ if( iPg>=0 ){
+ pIter->iLeafPgno = iPg - 1;
+ fts5SegIterNextPage(p, pIter);
+ fts5SegIterSetNext(p, pIter);
+ }
+ if( pIter->pLeaf ){
+ const u8 *a = pIter->pLeaf->p;
+ int iTermOff = 0;
+
+ pIter->iPgidxOff = pIter->pLeaf->szLeaf;
+ pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff);
+ pIter->iLeafOffset = iTermOff;
+ fts5SegIterLoadTerm(p, pIter, 0);
+ fts5SegIterLoadNPos(p, pIter);
+ if( bDlidx ) fts5SegIterLoadDlidx(p, pIter);
+
+ assert( p->rc!=SQLITE_OK ||
+ fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0
+ );
+ }
+}
+
/*
** Initialize the object pIter to point to term pTerm/nTerm within the
** in-memory hash table. If there is no such term in the hash-table, the
@@ -235053,14 +241349,21 @@ static void fts5SegIterHashInit(
const u8 *pList = 0;
p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm);
- sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList);
- n = (z ? (int)strlen((const char*)z) : 0);
+ sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList);
if( pList ){
pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data));
if( pLeaf ){
pLeaf->p = (u8*)pList;
}
}
+
+ /* The call to sqlite3Fts5HashScanInit() causes the hash table to
+ ** fill the size field of all existing position lists. This means they
+ ** can no longer be appended to. Since the only scenario in which they
+ ** can be appended to is if the previous operation on this table was
+ ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this
+ ** possibility altogether. */
+ p->bDelete = 0;
}else{
p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data),
(const char*)pTerm, nTerm, (void**)&pLeaf, &nList
@@ -235092,12 +241395,44 @@ static void fts5SegIterHashInit(
}
/*
+** Array ap[] contains n elements. Release each of these elements using
+** fts5DataRelease(). Then free the array itself using sqlite3_free().
+*/
+static void fts5IndexFreeArray(Fts5Data **ap, int n){
+ if( ap ){
+ int ii;
+ for(ii=0; ii<n; ii++){
+ fts5DataRelease(ap[ii]);
+ }
+ sqlite3_free(ap);
+ }
+}
+
+/*
+** Decrement the ref-count of the object passed as the only argument. If it
+** reaches 0, free it and its contents.
+*/
+static void fts5TombstoneArrayDelete(Fts5TombstoneArray *p){
+ if( p ){
+ p->nRef--;
+ if( p->nRef<=0 ){
+ int ii;
+ for(ii=0; ii<p->nTombstone; ii++){
+ fts5DataRelease(p->apTombstone[ii]);
+ }
+ sqlite3_free(p);
+ }
+ }
+}
+
+/*
** Zero the iterator passed as the only argument.
*/
static void fts5SegIterClear(Fts5SegIter *pIter){
fts5BufferFree(&pIter->term);
fts5DataRelease(pIter->pLeaf);
fts5DataRelease(pIter->pNextLeaf);
+ fts5TombstoneArrayDelete(pIter->pTombArray);
fts5DlidxIterFree(pIter->pDlidx);
sqlite3_free(pIter->aRowidOffset);
memset(pIter, 0, sizeof(Fts5SegIter));
@@ -235231,7 +241566,6 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){
assert_nc( i2!=0 );
pRes->bTermEq = 1;
if( p1->iRowid==p2->iRowid ){
- p1->bDel = p2->bDel;
return i2;
}
res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1;
@@ -235343,7 +241677,6 @@ static void fts5SegIterNextFrom(
}while( p->rc==SQLITE_OK );
}
-
/*
** Free the iterator object passed as the second argument.
*/
@@ -235436,6 +241769,85 @@ static void fts5MultiIterSetEof(Fts5Iter *pIter){
}
/*
+** The argument to this macro must be an Fts5Data structure containing a
+** tombstone hash page. This macro returns the key-size of the hash-page.
+*/
+#define TOMBSTONE_KEYSIZE(pPg) (pPg->p[0]==4 ? 4 : 8)
+
+#define TOMBSTONE_NSLOT(pPg) \
+ ((pPg->nn > 16) ? ((pPg->nn-8) / TOMBSTONE_KEYSIZE(pPg)) : 1)
+
+/*
+** Query a single tombstone hash table for rowid iRowid. Return true if
+** it is found or false otherwise. The tombstone hash table is one of
+** nHashTable tables.
+*/
+static int fts5IndexTombstoneQuery(
+ Fts5Data *pHash, /* Hash table page to query */
+ int nHashTable, /* Number of pages attached to segment */
+ u64 iRowid /* Rowid to query hash for */
+){
+ const int szKey = TOMBSTONE_KEYSIZE(pHash);
+ const int nSlot = TOMBSTONE_NSLOT(pHash);
+ int iSlot = (iRowid / nHashTable) % nSlot;
+ int nCollide = nSlot;
+
+ if( iRowid==0 ){
+ return pHash->p[1];
+ }else if( szKey==4 ){
+ u32 *aSlot = (u32*)&pHash->p[8];
+ while( aSlot[iSlot] ){
+ if( fts5GetU32((u8*)&aSlot[iSlot])==iRowid ) return 1;
+ if( nCollide--==0 ) break;
+ iSlot = (iSlot+1)%nSlot;
+ }
+ }else{
+ u64 *aSlot = (u64*)&pHash->p[8];
+ while( aSlot[iSlot] ){
+ if( fts5GetU64((u8*)&aSlot[iSlot])==iRowid ) return 1;
+ if( nCollide--==0 ) break;
+ iSlot = (iSlot+1)%nSlot;
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Return true if the iterator passed as the only argument points
+** to an segment entry for which there is a tombstone. Return false
+** if there is no tombstone or if the iterator is already at EOF.
+*/
+static int fts5MultiIterIsDeleted(Fts5Iter *pIter){
+ int iFirst = pIter->aFirst[1].iFirst;
+ Fts5SegIter *pSeg = &pIter->aSeg[iFirst];
+ Fts5TombstoneArray *pArray = pSeg->pTombArray;
+
+ if( pSeg->pLeaf && pArray ){
+ /* Figure out which page the rowid might be present on. */
+ int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone;
+ assert( iPg>=0 );
+
+ /* If tombstone hash page iPg has not yet been loaded from the
+ ** database, load it now. */
+ if( pArray->apTombstone[iPg]==0 ){
+ pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex,
+ FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg)
+ );
+ if( pArray->apTombstone[iPg]==0 ) return 0;
+ }
+
+ return fts5IndexTombstoneQuery(
+ pArray->apTombstone[iPg],
+ pArray->nTombstone,
+ pSeg->iRowid
+ );
+ }
+
+ return 0;
+}
+
+/*
** Move the iterator to the next entry.
**
** If an error occurs, an error code is left in Fts5Index.rc. It is not
@@ -235472,7 +241884,9 @@ static void fts5MultiIterNext(
fts5AssertMultiIterSetup(p, pIter);
assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf );
- if( pIter->bSkipEmpty==0 || pSeg->nPos ){
+ if( (pIter->bSkipEmpty==0 || pSeg->nPos)
+ && 0==fts5MultiIterIsDeleted(pIter)
+ ){
pIter->xSetOutputs(pIter, pSeg);
return;
}
@@ -235504,7 +241918,9 @@ static void fts5MultiIterNext2(
}
fts5AssertMultiIterSetup(p, pIter);
- }while( fts5MultiIterIsEmpty(p, pIter) );
+ }while( (fts5MultiIterIsEmpty(p, pIter) || fts5MultiIterIsDeleted(pIter))
+ && (p->rc==SQLITE_OK)
+ );
}
}
@@ -235517,7 +241933,7 @@ static Fts5Iter *fts5MultiIterAlloc(
int nSeg
){
Fts5Iter *pNew;
- int nSlot; /* Power of two >= nSeg */
+ i64 nSlot; /* Power of two >= nSeg */
for(nSlot=2; nSlot<nSeg; nSlot=nSlot*2);
pNew = fts5IdxMalloc(p,
@@ -235962,6 +242378,32 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){
}
}
+/*
+** All the component segment-iterators of pIter have been set up. This
+** functions finishes setup for iterator pIter itself.
+*/
+static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){
+ int iIter;
+ for(iIter=pIter->nSeg-1; iIter>0; iIter--){
+ int iEq;
+ if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){
+ Fts5SegIter *pSeg = &pIter->aSeg[iEq];
+ if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
+ fts5MultiIterAdvanced(p, pIter, iEq, iIter);
+ }
+ }
+ fts5MultiIterSetEof(pIter);
+ fts5AssertMultiIterSetup(p, pIter);
+
+ if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter))
+ || fts5MultiIterIsDeleted(pIter)
+ ){
+ fts5MultiIterNext(p, pIter, 0, 0);
+ }else if( pIter->base.bEof==0 ){
+ Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst];
+ pIter->xSetOutputs(pIter, pSeg);
+ }
+}
/*
** Allocate a new Fts5Iter object.
@@ -236043,29 +242485,12 @@ static void fts5MultiIterNew(
assert( iIter==nSeg );
}
- /* If the above was successful, each component iterators now points
+ /* If the above was successful, each component iterator now points
** to the first entry in its segment. In this case initialize the
** aFirst[] array. Or, if an error has occurred, free the iterator
** object and set the output variable to NULL. */
if( p->rc==SQLITE_OK ){
- for(iIter=pNew->nSeg-1; iIter>0; iIter--){
- int iEq;
- if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){
- Fts5SegIter *pSeg = &pNew->aSeg[iEq];
- if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0);
- fts5MultiIterAdvanced(p, pNew, iEq, iIter);
- }
- }
- fts5MultiIterSetEof(pNew);
- fts5AssertMultiIterSetup(p, pNew);
-
- if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){
- fts5MultiIterNext(p, pNew, 0, 0);
- }else if( pNew->base.bEof==0 ){
- Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst];
- pNew->xSetOutputs(pNew, pSeg);
- }
-
+ fts5MultiIterFinishSetup(p, pNew);
}else{
fts5MultiIterFree(pNew);
*ppOut = 0;
@@ -236090,7 +242515,6 @@ static void fts5MultiIterNew2(
pNew = fts5MultiIterAlloc(p, 2);
if( pNew ){
Fts5SegIter *pIter = &pNew->aSeg[1];
-
pIter->flags = FTS5_SEGITER_ONETERM;
if( pData->szLeaf>0 ){
pIter->pLeaf = pData;
@@ -236237,7 +242661,10 @@ static void fts5IndexDiscardData(Fts5Index *p){
if( p->pHash ){
sqlite3Fts5HashClear(p->pHash);
p->nPendingData = 0;
+ p->nPendingRow = 0;
+ p->flushRc = SQLITE_OK;
}
+ p->nContentlessDelete = 0;
}
/*
@@ -236451,7 +242878,7 @@ static void fts5WriteDlidxAppend(
}
if( pDlidx->bPrevValid ){
- iVal = iRowid - pDlidx->iPrev;
+ iVal = (u64)iRowid - (u64)pDlidx->iPrev;
}else{
i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno);
assert( pDlidx->buf.n==0 );
@@ -236638,7 +243065,7 @@ static void fts5WriteAppendPoslistData(
const u8 *a = aData;
int n = nData;
- assert( p->pConfig->pgsz>0 );
+ assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK );
while( p->rc==SQLITE_OK
&& (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz
){
@@ -236874,6 +243301,12 @@ static void fts5IndexMergeLevel(
/* Read input from all segments in the input level */
nInput = pLvl->nSeg;
+
+ /* Set the range of origins that will go into the output segment. */
+ if( pStruct->nOriginCntr>0 ){
+ pSeg->iOrigin1 = pLvl->aSeg[0].iOrigin1;
+ pSeg->iOrigin2 = pLvl->aSeg[pLvl->nSeg-1].iOrigin2;
+ }
}
bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2);
@@ -236933,8 +243366,11 @@ static void fts5IndexMergeLevel(
int i;
/* Remove the redundant segments from the %_data table */
+ assert( pSeg->nEntry==0 );
for(i=0; i<nInput; i++){
- fts5DataRemoveSegment(p, pLvl->aSeg[i].iSegid);
+ Fts5StructureSegment *pOld = &pLvl->aSeg[i];
+ pSeg->nEntry += (pOld->nEntry - pOld->nEntryTombstone);
+ fts5DataRemoveSegment(p, pOld);
}
/* Remove the redundant segments from the input level */
@@ -236961,6 +243397,43 @@ static void fts5IndexMergeLevel(
}
/*
+** If this is not a contentless_delete=1 table, or if the 'deletemerge'
+** configuration option is set to 0, then this function always returns -1.
+** Otherwise, it searches the structure object passed as the second argument
+** for a level suitable for merging due to having a large number of
+** tombstones in the tombstone hash. If one is found, its index is returned.
+** Otherwise, if there is no suitable level, -1.
+*/
+static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){
+ Fts5Config *pConfig = p->pConfig;
+ int iRet = -1;
+ if( pConfig->bContentlessDelete && pConfig->nDeleteMerge>0 ){
+ int ii;
+ int nBest = 0;
+
+ for(ii=0; ii<pStruct->nLevel; ii++){
+ Fts5StructureLevel *pLvl = &pStruct->aLevel[ii];
+ i64 nEntry = 0;
+ i64 nTomb = 0;
+ int iSeg;
+ for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
+ nEntry += pLvl->aSeg[iSeg].nEntry;
+ nTomb += pLvl->aSeg[iSeg].nEntryTombstone;
+ }
+ assert_nc( nEntry>0 || pLvl->nSeg==0 );
+ if( nEntry>0 ){
+ int nPercent = (nTomb * 100) / nEntry;
+ if( nPercent>=pConfig->nDeleteMerge && nPercent>nBest ){
+ iRet = ii;
+ nBest = nPercent;
+ }
+ }
+ }
+ }
+ return iRet;
+}
+
+/*
** Do up to nPg pages of automerge work on the index.
**
** Return true if any changes were actually made, or false otherwise.
@@ -236979,14 +243452,15 @@ static int fts5IndexMerge(
int iBestLvl = 0; /* Level offering the most input segments */
int nBest = 0; /* Number of input segments on best level */
- /* Set iBestLvl to the level to read input segments from. */
+ /* Set iBestLvl to the level to read input segments from. Or to -1 if
+ ** there is no level suitable to merge segments from. */
assert( pStruct->nLevel>0 );
for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
if( pLvl->nMerge ){
if( pLvl->nMerge>nBest ){
iBestLvl = iLvl;
- nBest = pLvl->nMerge;
+ nBest = nMin;
}
break;
}
@@ -236995,22 +243469,18 @@ static int fts5IndexMerge(
iBestLvl = iLvl;
}
}
-
- /* If nBest is still 0, then the index must be empty. */
-#ifdef SQLITE_DEBUG
- for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
- assert( pStruct->aLevel[iLvl].nSeg==0 );
+ if( nBest<nMin ){
+ iBestLvl = fts5IndexFindDeleteMerge(p, pStruct);
}
-#endif
- if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){
- break;
- }
+ if( iBestLvl<0 ) break;
bRet = 1;
fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
fts5StructurePromote(p, iBestLvl+1, pStruct);
}
+
+ if( nMin==1 ) nMin = 2;
}
*ppStruct = pStruct;
return bRet;
@@ -237176,7 +243646,7 @@ static void fts5SecureDeleteOverflow(
pLeaf = 0;
}else if( bDetailNone ){
break;
- }else if( iNext>=pLeaf->szLeaf || iNext<4 ){
+ }else if( iNext>=pLeaf->szLeaf || pLeaf->nn<pLeaf->szLeaf || iNext<4 ){
p->rc = FTS5_CORRUPT;
break;
}else{
@@ -237195,9 +243665,13 @@ static void fts5SecureDeleteOverflow(
int i1 = pLeaf->szLeaf;
int i2 = 0;
+ i1 += fts5GetVarint32(&aPg[i1], iFirst);
+ if( iFirst<iNext ){
+ p->rc = FTS5_CORRUPT;
+ break;
+ }
aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2);
if( aIdx==0 ) break;
- i1 += fts5GetVarint32(&aPg[i1], iFirst);
i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift);
if( i1<pLeaf->nn ){
memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1);
@@ -237242,7 +243716,6 @@ static void fts5DoSecureDelete(
int iPgIdx = pSeg->pLeaf->szLeaf;
u64 iDelta = 0;
- u64 iNextDelta = 0;
int iNextOff = 0;
int iOff = 0;
int nIdx = 0;
@@ -237250,8 +243723,6 @@ static void fts5DoSecureDelete(
int bLastInDoclist = 0;
int iIdx = 0;
int iStart = 0;
- int iKeyOff = 0;
- int iPrevKeyOff = 0;
int iDelKeyOff = 0; /* Offset of deleted key, if any */
nIdx = nPg-iPgIdx;
@@ -237276,10 +243747,21 @@ static void fts5DoSecureDelete(
** This block sets the following variables:
**
** iStart:
+ ** The offset of the first byte of the rowid or delta-rowid
+ ** value for the doclist entry being removed.
+ **
** iDelta:
+ ** The value of the rowid or delta-rowid value for the doclist
+ ** entry being removed.
+ **
+ ** iNextOff:
+ ** The offset of the next entry following the position list
+ ** for the one being removed. If the position list for this
+ ** entry overflows onto the next leaf page, this value will be
+ ** greater than pLeaf->szLeaf.
*/
{
- int iSOP;
+ int iSOP; /* Start-Of-Position-list */
if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){
iStart = pSeg->iTermLeafOffset;
}else{
@@ -237315,47 +243797,81 @@ static void fts5DoSecureDelete(
}
iOff = iStart;
+
+ /* If the position-list for the entry being removed flows over past
+ ** the end of this page, delete the portion of the position-list on the
+ ** next page and beyond.
+ **
+ ** Set variable bLastInDoclist to true if this entry happens
+ ** to be the last rowid in the doclist for its term. */
if( iNextOff>=iPgIdx ){
int pgno = pSeg->iLeafPgno+1;
fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist);
iNextOff = iPgIdx;
- }else{
- /* Set bLastInDoclist to true if the entry being removed is the last
- ** in its doclist. */
- for(iIdx=0, iKeyOff=0; iIdx<nIdx; /* no-op */){
- u32 iVal = 0;
- iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
- iKeyOff += iVal;
- if( iKeyOff==iNextOff ){
- bLastInDoclist = 1;
+ }
+
+ if( pSeg->bDel==0 ){
+ if( iNextOff!=iPgIdx ){
+ /* Loop through the page-footer. If iNextOff (offset of the
+ ** entry following the one we are removing) is equal to the
+ ** offset of a key on this page, then the entry is the last
+ ** in its doclist. */
+ int iKeyOff = 0;
+ for(iIdx=0; iIdx<nIdx; /* no-op */){
+ u32 iVal = 0;
+ iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
+ iKeyOff += iVal;
+ if( iKeyOff==iNextOff ){
+ bLastInDoclist = 1;
+ }
}
}
- }
- if( fts5GetU16(&aPg[0])==iStart && (bLastInDoclist||iNextOff==iPgIdx) ){
- fts5PutU16(&aPg[0], 0);
+ /* If this is (a) the first rowid on a page and (b) is not followed by
+ ** another position list on the same page, set the "first-rowid" field
+ ** of the header to 0. */
+ if( fts5GetU16(&aPg[0])==iStart && (bLastInDoclist || iNextOff==iPgIdx) ){
+ fts5PutU16(&aPg[0], 0);
+ }
}
- if( bLastInDoclist==0 ){
+ if( pSeg->bDel ){
+ iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta);
+ aPg[iOff++] = 0x01;
+ }else if( bLastInDoclist==0 ){
if( iNextOff!=iPgIdx ){
+ u64 iNextDelta = 0;
iNextOff += fts5GetVarint(&aPg[iNextOff], &iNextDelta);
iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta + iNextDelta);
}
}else if(
- iStart==pSeg->iTermLeafOffset && pSeg->iLeafPgno==pSeg->iTermLeafPgno
+ pSeg->iLeafPgno==pSeg->iTermLeafPgno
+ && iStart==pSeg->iTermLeafOffset
){
/* The entry being removed was the only position list in its
** doclist. Therefore the term needs to be removed as well. */
int iKey = 0;
- for(iIdx=0, iKeyOff=0; iIdx<nIdx; iKey++){
+ int iKeyOff = 0;
+
+ /* Set iKeyOff to the offset of the term that will be removed - the
+ ** last offset in the footer that is not greater than iStart. */
+ for(iIdx=0; iIdx<nIdx; iKey++){
u32 iVal = 0;
iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
if( (iKeyOff+iVal)>(u32)iStart ) break;
iKeyOff += iVal;
}
+ assert_nc( iKey>=1 );
+ /* Set iDelKeyOff to the value of the footer entry to remove from
+ ** the page. */
iDelKeyOff = iOff = iKeyOff;
+
if( iNextOff!=iPgIdx ){
+ /* This is the only position-list associated with the term, and there
+ ** is another term following it on this page. So the subsequent term
+ ** needs to be moved to replace the term associated with the entry
+ ** being removed. */
int nPrefix = 0;
int nSuffix = 0;
int nPrefix2 = 0;
@@ -237380,7 +243896,9 @@ static void fts5DoSecureDelete(
iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix);
}
iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix);
- if( nPrefix2>nPrefix ){
+ if( nPrefix2>pSeg->term.n ){
+ p->rc = FTS5_CORRUPT;
+ }else if( nPrefix2>nPrefix ){
memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix);
iOff += (nPrefix2-nPrefix);
}
@@ -237390,80 +243908,88 @@ static void fts5DoSecureDelete(
}
}
}else if( iStart==4 ){
- int iPgno;
-
- assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno );
- /* The entry being removed may be the only position list in
- ** its doclist. */
- for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){
- Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno));
- int bEmpty = (pPg && pPg->nn==4);
- fts5DataRelease(pPg);
- if( bEmpty==0 ) break;
- }
-
- if( iPgno==pSeg->iTermLeafPgno ){
- i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno);
- Fts5Data *pTerm = fts5DataRead(p, iId);
- if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){
- u8 *aTermIdx = &pTerm->p[pTerm->szLeaf];
- int nTermIdx = pTerm->nn - pTerm->szLeaf;
- int iTermIdx = 0;
- int iTermOff = 0;
-
- while( 1 ){
- u32 iVal = 0;
- int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal);
- iTermOff += iVal;
- if( (iTermIdx+nByte)>=nTermIdx ) break;
- iTermIdx += nByte;
- }
- nTermIdx = iTermIdx;
+ int iPgno;
+
+ assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno );
+ /* The entry being removed may be the only position list in
+ ** its doclist. */
+ for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){
+ Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno));
+ int bEmpty = (pPg && pPg->nn==4);
+ fts5DataRelease(pPg);
+ if( bEmpty==0 ) break;
+ }
+
+ if( iPgno==pSeg->iTermLeafPgno ){
+ i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno);
+ Fts5Data *pTerm = fts5DataRead(p, iId);
+ if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){
+ u8 *aTermIdx = &pTerm->p[pTerm->szLeaf];
+ int nTermIdx = pTerm->nn - pTerm->szLeaf;
+ int iTermIdx = 0;
+ int iTermOff = 0;
+
+ while( 1 ){
+ u32 iVal = 0;
+ int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal);
+ iTermOff += iVal;
+ if( (iTermIdx+nByte)>=nTermIdx ) break;
+ iTermIdx += nByte;
+ }
+ nTermIdx = iTermIdx;
- memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx);
- fts5PutU16(&pTerm->p[2], iTermOff);
+ memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx);
+ fts5PutU16(&pTerm->p[2], iTermOff);
- fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx);
- if( nTermIdx==0 ){
- fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno);
- }
+ fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx);
+ if( nTermIdx==0 ){
+ fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno);
}
- fts5DataRelease(pTerm);
}
+ fts5DataRelease(pTerm);
}
+ }
- if( p->rc==SQLITE_OK ){
- const int nMove = nPg - iNextOff;
- int nShift = 0;
+ /* Assuming no error has occurred, this block does final edits to the
+ ** leaf page before writing it back to disk. Input variables are:
+ **
+ ** nPg: Total initial size of leaf page.
+ ** iPgIdx: Initial offset of page footer.
+ **
+ ** iOff: Offset to move data to
+ ** iNextOff: Offset to move data from
+ */
+ if( p->rc==SQLITE_OK ){
+ const int nMove = nPg - iNextOff; /* Number of bytes to move */
+ int nShift = iNextOff - iOff; /* Distance to move them */
- memmove(&aPg[iOff], &aPg[iNextOff], nMove);
- iPgIdx -= (iNextOff - iOff);
- nPg = iPgIdx;
- fts5PutU16(&aPg[2], iPgIdx);
+ int iPrevKeyOut = 0;
+ int iKeyIn = 0;
- nShift = iNextOff - iOff;
- for(iIdx=0, iKeyOff=0, iPrevKeyOff=0; iIdx<nIdx; /* no-op */){
- u32 iVal = 0;
- iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
- iKeyOff += iVal;
- if( iKeyOff!=iDelKeyOff ){
- if( iKeyOff>iOff ){
- iKeyOff -= nShift;
- nShift = 0;
- }
- nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOff - iPrevKeyOff);
- iPrevKeyOff = iKeyOff;
- }
- }
+ memmove(&aPg[iOff], &aPg[iNextOff], nMove);
+ iPgIdx -= nShift;
+ nPg = iPgIdx;
+ fts5PutU16(&aPg[2], iPgIdx);
- if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){
- fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno);
+ for(iIdx=0; iIdx<nIdx; /* no-op */){
+ u32 iVal = 0;
+ iIdx += fts5GetVarint32(&aIdx[iIdx], iVal);
+ iKeyIn += iVal;
+ if( iKeyIn!=iDelKeyOff ){
+ int iKeyOut = (iKeyIn - (iKeyIn>iOff ? nShift : 0));
+ nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOut - iPrevKeyOut);
+ iPrevKeyOut = iKeyOut;
}
+ }
- assert_nc( nPg>4 || fts5GetU16(aPg)==0 );
- fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg,nPg);
+ if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){
+ fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno);
}
- sqlite3_free(aIdx);
+
+ assert_nc( nPg>4 || fts5GetU16(aPg)==0 );
+ fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg, nPg);
+ }
+ sqlite3_free(aIdx);
}
/*
@@ -237475,10 +244001,10 @@ static void fts5FlushSecureDelete(
Fts5Index *p,
Fts5Structure *pStruct,
const char *zTerm,
+ int nTerm,
i64 iRowid
){
const int f = FTS5INDEX_QUERY_SKIPHASH;
- int nTerm = (int)strlen(zTerm);
Fts5Iter *pIter = 0; /* Used to find term instance */
fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter);
@@ -237517,184 +244043,197 @@ static void fts5FlushOneHash(Fts5Index *p){
/* Obtain a reference to the index structure and allocate a new segment-id
** for the new level-0 segment. */
pStruct = fts5StructureRead(p);
- iSegid = fts5AllocateSegid(p, pStruct);
fts5StructureInvalidate(p);
- if( iSegid ){
- const int pgsz = p->pConfig->pgsz;
- int eDetail = p->pConfig->eDetail;
- int bSecureDelete = p->pConfig->bSecureDelete;
- Fts5StructureSegment *pSeg; /* New segment within pStruct */
- Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
- Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
-
- Fts5SegWriter writer;
- fts5WriteInit(p, &writer, iSegid);
-
- pBuf = &writer.writer.buf;
- pPgidx = &writer.writer.pgidx;
-
- /* fts5WriteInit() should have initialized the buffers to (most likely)
- ** the maximum space required. */
- assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
- assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+ if( sqlite3Fts5HashIsEmpty(pHash)==0 ){
+ iSegid = fts5AllocateSegid(p, pStruct);
+ if( iSegid ){
+ const int pgsz = p->pConfig->pgsz;
+ int eDetail = p->pConfig->eDetail;
+ int bSecureDelete = p->pConfig->bSecureDelete;
+ Fts5StructureSegment *pSeg; /* New segment within pStruct */
+ Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
+ Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
+
+ Fts5SegWriter writer;
+ fts5WriteInit(p, &writer, iSegid);
+
+ pBuf = &writer.writer.buf;
+ pPgidx = &writer.writer.pgidx;
+
+ /* fts5WriteInit() should have initialized the buffers to (most likely)
+ ** the maximum space required. */
+ assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+ assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
+
+ /* Begin scanning through hash table entries. This loop runs once for each
+ ** term/doclist currently stored within the hash table. */
+ if( p->rc==SQLITE_OK ){
+ p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
+ }
+ while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
+ const char *zTerm; /* Buffer containing term */
+ int nTerm; /* Size of zTerm in bytes */
+ const u8 *pDoclist; /* Pointer to doclist for this term */
+ int nDoclist; /* Size of doclist in bytes */
+
+ /* Get the term and doclist for this entry. */
+ sqlite3Fts5HashScanEntry(pHash, &zTerm, &nTerm, &pDoclist, &nDoclist);
+ if( bSecureDelete==0 ){
+ fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
+ if( p->rc!=SQLITE_OK ) break;
+ assert( writer.bFirstRowidInPage==0 );
+ }
- /* Begin scanning through hash table entries. This loop runs once for each
- ** term/doclist currently stored within the hash table. */
- if( p->rc==SQLITE_OK ){
- p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
- }
- while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
- const char *zTerm; /* Buffer containing term */
- int nTerm; /* Size of zTerm in bytes */
- const u8 *pDoclist; /* Pointer to doclist for this term */
- int nDoclist; /* Size of doclist in bytes */
-
- /* Get the term and doclist for this entry. */
- sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
- nTerm = (int)strlen(zTerm);
- if( bSecureDelete==0 ){
- fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
- if( p->rc!=SQLITE_OK ) break;
- assert( writer.bFirstRowidInPage==0 );
- }
-
- if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
- /* The entire doclist will fit on the current leaf. */
- fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
- }else{
- int bTermWritten = !bSecureDelete;
- i64 iRowid = 0;
- i64 iPrev = 0;
- int iOff = 0;
-
- /* The entire doclist will not fit on this leaf. The following
- ** loop iterates through the poslists that make up the current
- ** doclist. */
- while( p->rc==SQLITE_OK && iOff<nDoclist ){
- u64 iDelta = 0;
- iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
- iRowid += iDelta;
-
- /* If in secure delete mode, and if this entry in the poslist is
- ** in fact a delete, then edit the existing segments directly
- ** using fts5FlushSecureDelete(). */
- if( bSecureDelete ){
- if( eDetail==FTS5_DETAIL_NONE ){
- if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
- fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
- iOff++;
+ if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
+ /* The entire doclist will fit on the current leaf. */
+ fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
+ }else{
+ int bTermWritten = !bSecureDelete;
+ i64 iRowid = 0;
+ i64 iPrev = 0;
+ int iOff = 0;
+
+ /* The entire doclist will not fit on this leaf. The following
+ ** loop iterates through the poslists that make up the current
+ ** doclist. */
+ while( p->rc==SQLITE_OK && iOff<nDoclist ){
+ u64 iDelta = 0;
+ iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
+ iRowid += iDelta;
+
+ /* If in secure delete mode, and if this entry in the poslist is
+ ** in fact a delete, then edit the existing segments directly
+ ** using fts5FlushSecureDelete(). */
+ if( bSecureDelete ){
+ if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
+ fts5FlushSecureDelete(p, pStruct, zTerm, nTerm, iRowid);
+ iOff++;
+ if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
+ iOff++;
+ nDoclist = 0;
+ }else{
+ continue;
+ }
+ }
+ }else if( (pDoclist[iOff] & 0x01) ){
+ fts5FlushSecureDelete(p, pStruct, zTerm, nTerm, iRowid);
+ if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
iOff++;
- nDoclist = 0;
- }else{
continue;
}
}
- }else if( (pDoclist[iOff] & 0x01) ){
- fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
- if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
- iOff++;
- continue;
- }
}
- }
- if( p->rc==SQLITE_OK && bTermWritten==0 ){
- fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
- bTermWritten = 1;
- assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
- }
+ if( p->rc==SQLITE_OK && bTermWritten==0 ){
+ fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
+ bTermWritten = 1;
+ assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
+ }
- if( writer.bFirstRowidInPage ){
- fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
- writer.bFirstRowidInPage = 0;
- fts5WriteDlidxAppend(p, &writer, iRowid);
- }else{
- pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid-iPrev);
- }
- if( p->rc!=SQLITE_OK ) break;
- assert( pBuf->n<=pBuf->nSpace );
- iPrev = iRowid;
+ if( writer.bFirstRowidInPage ){
+ fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
+ writer.bFirstRowidInPage = 0;
+ fts5WriteDlidxAppend(p, &writer, iRowid);
+ }else{
+ u64 iRowidDelta = (u64)iRowid - (u64)iPrev;
+ pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowidDelta);
+ }
+ if( p->rc!=SQLITE_OK ) break;
+ assert( pBuf->n<=pBuf->nSpace );
+ iPrev = iRowid;
- if( eDetail==FTS5_DETAIL_NONE ){
- if( iOff<nDoclist && pDoclist[iOff]==0 ){
- pBuf->p[pBuf->n++] = 0;
- iOff++;
+ if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0 ){
pBuf->p[pBuf->n++] = 0;
iOff++;
+ if( iOff<nDoclist && pDoclist[iOff]==0 ){
+ pBuf->p[pBuf->n++] = 0;
+ iOff++;
+ }
+ }
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
}
- }
- if( (pBuf->n + pPgidx->n)>=pgsz ){
- fts5WriteFlushLeaf(p, &writer);
- }
- }else{
- int bDummy;
- int nPos;
- int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
- nCopy += nPos;
- if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
- /* The entire poslist will fit on the current leaf. So copy
- ** it in one go. */
- fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
}else{
- /* The entire poslist will not fit on this leaf. So it needs
- ** to be broken into sections. The only qualification being
- ** that each varint must be stored contiguously. */
- const u8 *pPoslist = &pDoclist[iOff];
- int iPos = 0;
- while( p->rc==SQLITE_OK ){
- int nSpace = pgsz - pBuf->n - pPgidx->n;
- int n = 0;
- if( (nCopy - iPos)<=nSpace ){
- n = nCopy - iPos;
- }else{
- n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
- }
- assert( n>0 );
- fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
- iPos += n;
- if( (pBuf->n + pPgidx->n)>=pgsz ){
- fts5WriteFlushLeaf(p, &writer);
+ int bDel = 0;
+ int nPos = 0;
+ int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDel);
+ if( bDel && bSecureDelete ){
+ fts5BufferAppendVarint(&p->rc, pBuf, nPos*2);
+ iOff += nCopy;
+ nCopy = nPos;
+ }else{
+ nCopy += nPos;
+ }
+ if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
+ /* The entire poslist will fit on the current leaf. So copy
+ ** it in one go. */
+ fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
+ }else{
+ /* The entire poslist will not fit on this leaf. So it needs
+ ** to be broken into sections. The only qualification being
+ ** that each varint must be stored contiguously. */
+ const u8 *pPoslist = &pDoclist[iOff];
+ int iPos = 0;
+ while( p->rc==SQLITE_OK ){
+ int nSpace = pgsz - pBuf->n - pPgidx->n;
+ int n = 0;
+ if( (nCopy - iPos)<=nSpace ){
+ n = nCopy - iPos;
+ }else{
+ n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
+ }
+ assert( n>0 );
+ fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
+ iPos += n;
+ if( (pBuf->n + pPgidx->n)>=pgsz ){
+ fts5WriteFlushLeaf(p, &writer);
+ }
+ if( iPos>=nCopy ) break;
}
- if( iPos>=nCopy ) break;
}
+ iOff += nCopy;
}
- iOff += nCopy;
}
}
- }
-
- /* TODO2: Doclist terminator written here. */
- /* pBuf->p[pBuf->n++] = '\0'; */
- assert( pBuf->n<=pBuf->nSpace );
- if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
- }
- sqlite3Fts5HashClear(pHash);
- fts5WriteFinish(p, &writer, &pgnoLast);
- assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
- if( pgnoLast>0 ){
- /* Update the Fts5Structure. It is written back to the database by the
- ** fts5StructureRelease() call below. */
- if( pStruct->nLevel==0 ){
- fts5StructureAddLevel(&p->rc, &pStruct);
+ /* TODO2: Doclist terminator written here. */
+ /* pBuf->p[pBuf->n++] = '\0'; */
+ assert( pBuf->n<=pBuf->nSpace );
+ if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
}
- fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
- if( p->rc==SQLITE_OK ){
- pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
- pSeg->iSegid = iSegid;
- pSeg->pgnoFirst = 1;
- pSeg->pgnoLast = pgnoLast;
- pStruct->nSegment++;
+ fts5WriteFinish(p, &writer, &pgnoLast);
+
+ assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
+ if( pgnoLast>0 ){
+ /* Update the Fts5Structure. It is written back to the database by the
+ ** fts5StructureRelease() call below. */
+ if( pStruct->nLevel==0 ){
+ fts5StructureAddLevel(&p->rc, &pStruct);
+ }
+ fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
+ if( p->rc==SQLITE_OK ){
+ pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
+ pSeg->iSegid = iSegid;
+ pSeg->pgnoFirst = 1;
+ pSeg->pgnoLast = pgnoLast;
+ if( pStruct->nOriginCntr>0 ){
+ pSeg->iOrigin1 = pStruct->nOriginCntr;
+ pSeg->iOrigin2 = pStruct->nOriginCntr;
+ pSeg->nEntry = p->nPendingRow;
+ pStruct->nOriginCntr++;
+ }
+ pStruct->nSegment++;
+ }
+ fts5StructurePromote(p, 0, pStruct);
}
- fts5StructurePromote(p, 0, pStruct);
}
}
- fts5IndexAutomerge(p, &pStruct, pgnoLast);
+ fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete);
fts5IndexCrisismerge(p, &pStruct);
fts5StructureWrite(p, pStruct);
fts5StructureRelease(pStruct);
@@ -237705,10 +244244,21 @@ static void fts5FlushOneHash(Fts5Index *p){
*/
static void fts5IndexFlush(Fts5Index *p){
/* Unless it is empty, flush the hash table to disk */
- if( p->nPendingData ){
+ if( p->flushRc ){
+ p->rc = p->flushRc;
+ return;
+ }
+ if( p->nPendingData || p->nContentlessDelete ){
assert( p->pHash );
- p->nPendingData = 0;
fts5FlushOneHash(p);
+ if( p->rc==SQLITE_OK ){
+ sqlite3Fts5HashClear(p->pHash);
+ p->nPendingData = 0;
+ p->nPendingRow = 0;
+ p->nContentlessDelete = 0;
+ }else if( p->nPendingData || p->nContentlessDelete ){
+ p->flushRc = p->rc;
+ }
}
}
@@ -237724,17 +244274,22 @@ static Fts5Structure *fts5IndexOptimizeStruct(
/* Figure out if this structure requires optimization. A structure does
** not require optimization if either:
**
- ** + it consists of fewer than two segments, or
- ** + all segments are on the same level, or
- ** + all segments except one are currently inputs to a merge operation.
+ ** 1. it consists of fewer than two segments, or
+ ** 2. all segments are on the same level, or
+ ** 3. all segments except one are currently inputs to a merge operation.
**
- ** In the first case, return NULL. In the second, increment the ref-count
- ** on *pStruct and return a copy of the pointer to it.
+ ** In the first case, if there are no tombstone hash pages, return NULL. In
+ ** the second, increment the ref-count on *pStruct and return a copy of the
+ ** pointer to it.
*/
- if( nSeg<2 ) return 0;
+ if( nSeg==0 ) return 0;
for(i=0; i<pStruct->nLevel; i++){
int nThis = pStruct->aLevel[i].nSeg;
- if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){
+ int nMerge = pStruct->aLevel[i].nMerge;
+ if( nThis>0 && (nThis==nSeg || (nThis==nSeg-1 && nMerge==nThis)) ){
+ if( nSeg==1 && nThis==1 && pStruct->aLevel[i].aSeg[0].nPgTombstone==0 ){
+ return 0;
+ }
fts5StructureRef(pStruct);
return pStruct;
}
@@ -237750,6 +244305,7 @@ static Fts5Structure *fts5IndexOptimizeStruct(
pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL);
pNew->nRef = 1;
pNew->nWriteCounter = pStruct->nWriteCounter;
+ pNew->nOriginCntr = pStruct->nOriginCntr;
pLvl = &pNew->aLevel[pNew->nLevel-1];
pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte);
if( pLvl->aSeg ){
@@ -237780,7 +244336,9 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){
assert( p->rc==SQLITE_OK );
fts5IndexFlush(p);
+ assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 );
pStruct = fts5StructureRead(p);
+ assert( p->rc!=SQLITE_OK || pStruct!=0 );
fts5StructureInvalidate(p);
if( pStruct ){
@@ -237809,7 +244367,10 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){
** INSERT command.
*/
static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
- Fts5Structure *pStruct = fts5StructureRead(p);
+ Fts5Structure *pStruct = 0;
+
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
if( pStruct ){
int nMin = p->pConfig->nUsermerge;
fts5StructureInvalidate(p);
@@ -237817,7 +244378,7 @@ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct);
fts5StructureRelease(pStruct);
pStruct = pNew;
- nMin = 2;
+ nMin = 1;
nMerge = nMerge*-1;
}
if( pStruct && pStruct->nLevel ){
@@ -238183,7 +244744,7 @@ static void fts5SetupPrefixIter(
u8 *pToken, /* Buffer containing prefix to match */
int nToken, /* Size of buffer pToken in bytes */
Fts5Colset *pColset, /* Restrict matches to these columns */
- Fts5Iter **ppIter /* OUT: New iterator */
+ Fts5Iter **ppIter /* OUT: New iterator */
){
Fts5Structure *pStruct;
Fts5Buffer *aBuf;
@@ -238204,8 +244765,9 @@ static void fts5SetupPrefixIter(
aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf);
pStruct = fts5StructureRead(p);
+ assert( p->rc!=SQLITE_OK || (aBuf && pStruct) );
- if( aBuf && pStruct ){
+ if( p->rc==SQLITE_OK ){
const int flags = FTS5INDEX_QUERY_SCAN
| FTS5INDEX_QUERY_SKIPEMPTY
| FTS5INDEX_QUERY_NOOUTPUT;
@@ -238217,6 +244779,12 @@ static void fts5SetupPrefixIter(
int bNewTerm = 1;
memset(&doclist, 0, sizeof(doclist));
+
+ /* If iIdx is non-zero, then it is the number of a prefix-index for
+ ** prefixes 1 character longer than the prefix being queried for. That
+ ** index contains all the doclists required, except for the one
+ ** corresponding to the prefix itself. That one is extracted from the
+ ** main term index here. */
if( iIdx!=0 ){
int dummy = 0;
const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT;
@@ -238240,6 +244808,7 @@ static void fts5SetupPrefixIter(
pToken[0] = FTS5_MAIN_PREFIX + iIdx;
fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1);
fts5IterSetOutputCb(&p->rc, p1);
+
for( /* no-op */ ;
fts5MultiIterEof(p, p1)==0;
fts5MultiIterNext2(p, p1, &bNewTerm)
@@ -238255,7 +244824,6 @@ static void fts5SetupPrefixIter(
}
if( p1->base.nData==0 ) continue;
-
if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){
for(i=0; p->rc==SQLITE_OK && doclist.n; i++){
int i1 = i*nMerge;
@@ -238294,7 +244862,7 @@ static void fts5SetupPrefixIter(
}
fts5MultiIterFree(p1);
- pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING);
+ pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING);
if( pData ){
pData->p = (u8*)&pData[1];
pData->nn = pData->szLeaf = doclist.n;
@@ -238331,6 +244899,9 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){
p->iWriteRowid = iRowid;
p->bDelete = bDelete;
+ if( bDelete==0 ){
+ p->nPendingRow++;
+ }
return fts5IndexReturn(p);
}
@@ -238368,6 +244939,9 @@ static int sqlite3Fts5IndexReinit(Fts5Index *p){
fts5StructureInvalidate(p);
fts5IndexDiscardData(p);
memset(&s, 0, sizeof(Fts5Structure));
+ if( p->pConfig->bContentlessDelete ){
+ s.nOriginCntr = 1;
+ }
fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0);
fts5StructureWrite(p, &s);
return fts5IndexReturn(p);
@@ -238431,6 +245005,7 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){
sqlite3_finalize(p->pIdxWriter);
sqlite3_finalize(p->pIdxDeleter);
sqlite3_finalize(p->pIdxSelect);
+ sqlite3_finalize(p->pIdxNextSelect);
sqlite3_finalize(p->pDataVersion);
sqlite3_finalize(p->pDeleteFromIdx);
sqlite3Fts5HashFree(p->pHash);
@@ -238527,6 +245102,454 @@ static int sqlite3Fts5IndexWrite(
}
/*
+** pToken points to a buffer of size nToken bytes containing a search
+** term, including the index number at the start, used on a tokendata=1
+** table. This function returns true if the term in buffer pBuf matches
+** token pToken/nToken.
+*/
+static int fts5IsTokendataPrefix(
+ Fts5Buffer *pBuf,
+ const u8 *pToken,
+ int nToken
+){
+ return (
+ pBuf->n>=nToken
+ && 0==memcmp(pBuf->p, pToken, nToken)
+ && (pBuf->n==nToken || pBuf->p[nToken]==0x00)
+ );
+}
+
+/*
+** Ensure the segment-iterator passed as the only argument points to EOF.
+*/
+static void fts5SegIterSetEOF(Fts5SegIter *pSeg){
+ fts5DataRelease(pSeg->pLeaf);
+ pSeg->pLeaf = 0;
+}
+
+/*
+** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an
+** array of these for each row it visits. Or, for an iterator used by an
+** "ORDER BY rank" query, it accumulates an array of these for the entire
+** query.
+**
+** Each instance in the array indicates the iterator (and therefore term)
+** associated with position iPos of rowid iRowid. This is used by the
+** xInstToken() API.
+*/
+struct Fts5TokenDataMap {
+ i64 iRowid; /* Row this token is located in */
+ i64 iPos; /* Position of token */
+ int iIter; /* Iterator token was read from */
+};
+
+/*
+** An object used to supplement Fts5Iter for tokendata=1 iterators.
+*/
+struct Fts5TokenDataIter {
+ int nIter;
+ int nIterAlloc;
+
+ int nMap;
+ int nMapAlloc;
+ Fts5TokenDataMap *aMap;
+
+ Fts5PoslistReader *aPoslistReader;
+ int *aPoslistToIter;
+ Fts5Iter *apIter[1];
+};
+
+/*
+** This function appends iterator pAppend to Fts5TokenDataIter pIn and
+** returns the result.
+*/
+static Fts5TokenDataIter *fts5AppendTokendataIter(
+ Fts5Index *p, /* Index object (for error code) */
+ Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */
+ Fts5Iter *pAppend /* Append this iterator */
+){
+ Fts5TokenDataIter *pRet = pIn;
+
+ if( p->rc==SQLITE_OK ){
+ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){
+ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16;
+ int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter);
+ Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte);
+
+ if( pNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ }else{
+ if( pIn==0 ) memset(pNew, 0, nByte);
+ pRet = pNew;
+ pNew->nIterAlloc = nAlloc;
+ }
+ }
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pAppend);
+ }else{
+ pRet->apIter[pRet->nIter++] = pAppend;
+ }
+ assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc );
+
+ return pRet;
+}
+
+/*
+** Delete an Fts5TokenDataIter structure and its contents.
+*/
+static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){
+ if( pSet ){
+ int ii;
+ for(ii=0; ii<pSet->nIter; ii++){
+ fts5MultiIterFree(pSet->apIter[ii]);
+ }
+ sqlite3_free(pSet->aPoslistReader);
+ sqlite3_free(pSet->aMap);
+ sqlite3_free(pSet);
+ }
+}
+
+/*
+** Append a mapping to the token-map belonging to object pT.
+*/
+static void fts5TokendataIterAppendMap(
+ Fts5Index *p,
+ Fts5TokenDataIter *pT,
+ int iIter,
+ i64 iRowid,
+ i64 iPos
+){
+ if( p->rc==SQLITE_OK ){
+ if( pT->nMap==pT->nMapAlloc ){
+ int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64;
+ int nByte = nNew * sizeof(Fts5TokenDataMap);
+ Fts5TokenDataMap *aNew;
+
+ aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte);
+ if( aNew==0 ){
+ p->rc = SQLITE_NOMEM;
+ return;
+ }
+
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->aMap[pT->nMap].iPos = iPos;
+ pT->aMap[pT->nMap].iIter = iIter;
+ pT->nMap++;
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function sets the iterator output
+** variables (pIter->base.*) according to the contents of the current
+** row.
+*/
+static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){
+ int ii;
+ int nHit = 0;
+ i64 iRowid = SMALLEST_INT64;
+ int iMin = 0;
+
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ pIter->base.nData = 0;
+ pIter->base.pData = 0;
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0 ){
+ if( nHit==0 || p->base.iRowid<iRowid ){
+ iRowid = p->base.iRowid;
+ nHit = 1;
+ pIter->base.pData = p->base.pData;
+ pIter->base.nData = p->base.nData;
+ iMin = ii;
+ }else if( p->base.iRowid==iRowid ){
+ nHit++;
+ }
+ }
+ }
+
+ if( nHit==0 ){
+ pIter->base.bEof = 1;
+ }else{
+ int eDetail = pIter->pIndex->pConfig->eDetail;
+ pIter->base.bEof = 0;
+ pIter->base.iRowid = iRowid;
+
+ if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){
+ fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1);
+ }else
+ if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){
+ int nReader = 0;
+ int nByte = 0;
+ i64 iPrev = 0;
+
+ /* Allocate array of iterators if they are not already allocated. */
+ if( pT->aPoslistReader==0 ){
+ pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero(
+ &pIter->pIndex->rc,
+ pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int))
+ );
+ if( pT->aPoslistReader==0 ) return;
+ pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter];
+ }
+
+ /* Populate an iterator for each poslist that will be merged */
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( iRowid==p->base.iRowid ){
+ pT->aPoslistToIter[nReader] = ii;
+ sqlite3Fts5PoslistReaderInit(
+ p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++]
+ );
+ nByte += p->base.nData;
+ }
+ }
+
+ /* Ensure the output buffer is large enough */
+ if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){
+ return;
+ }
+
+ /* Ensure the token-mapping is large enough */
+ if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){
+ int nNew = (pT->nMapAlloc + nByte) * 2;
+ Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc(
+ pT->aMap, nNew*sizeof(Fts5TokenDataMap)
+ );
+ if( aNew==0 ){
+ pIter->pIndex->rc = SQLITE_NOMEM;
+ return;
+ }
+ pT->aMap = aNew;
+ pT->nMapAlloc = nNew;
+ }
+
+ pIter->poslist.n = 0;
+
+ while( 1 ){
+ i64 iMinPos = LARGEST_INT64;
+
+ /* Find smallest position */
+ iMin = 0;
+ for(ii=0; ii<nReader; ii++){
+ Fts5PoslistReader *pReader = &pT->aPoslistReader[ii];
+ if( pReader->bEof==0 ){
+ if( pReader->iPos<iMinPos ){
+ iMinPos = pReader->iPos;
+ iMin = ii;
+ }
+ }
+ }
+
+ /* If all readers were at EOF, break out of the loop. */
+ if( iMinPos==LARGEST_INT64 ) break;
+
+ sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos);
+ sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]);
+
+ if( eDetail==FTS5_DETAIL_FULL ){
+ pT->aMap[pT->nMap].iPos = iMinPos;
+ pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin];
+ pT->aMap[pT->nMap].iRowid = iRowid;
+ pT->nMap++;
+ }
+ }
+
+ pIter->base.pData = pIter->poslist.p;
+ pIter->base.nData = pIter->poslist.n;
+ }
+ }
+}
+
+/*
+** The iterator passed as the only argument must be a tokendata=1 iterator
+** (pIter->pTokenDataIter!=0). This function advances the iterator. If
+** argument bFrom is false, then the iterator is advanced to the next
+** entry. Or, if bFrom is true, it is advanced to the first entry with
+** a rowid of iFrom or greater.
+*/
+static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){
+ int ii;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Iter *p = pT->apIter[ii];
+ if( p->base.bEof==0
+ && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowid<iFrom))
+ ){
+ fts5MultiIterNext(p->pIndex, p, bFrom, iFrom);
+ while( bFrom && p->base.bEof==0
+ && p->base.iRowid<iFrom
+ && p->pIndex->rc==SQLITE_OK
+ ){
+ fts5MultiIterNext(p->pIndex, p, 0, 0);
+ }
+ }
+ }
+
+ fts5IterSetOutputsTokendata(pIter);
+}
+
+/*
+** If the segment-iterator passed as the first argument is at EOF, then
+** set pIter->term to a copy of buffer pTerm.
+*/
+static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){
+ if( pIter && pIter->aSeg[0].pLeaf==0 ){
+ fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p);
+ }
+}
+
+/*
+** This function sets up an iterator to use for a non-prefix query on a
+** tokendata=1 table.
+*/
+static Fts5Iter *fts5SetupTokendataIter(
+ Fts5Index *p, /* FTS index to query */
+ const u8 *pToken, /* Buffer containing query term */
+ int nToken, /* Size of buffer pToken in bytes */
+ Fts5Colset *pColset /* Colset to filter on */
+){
+ Fts5Iter *pRet = 0;
+ Fts5TokenDataIter *pSet = 0;
+ Fts5Structure *pStruct = 0;
+ const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN;
+
+ Fts5Buffer bSeek = {0, 0, 0};
+ Fts5Buffer *pSmall = 0;
+
+ fts5IndexFlush(p);
+ pStruct = fts5StructureRead(p);
+
+ while( p->rc==SQLITE_OK ){
+ Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0;
+ Fts5Iter *pNew = 0;
+ Fts5SegIter *pNewIter = 0;
+ Fts5SegIter *pPrevIter = 0;
+
+ int iLvl, iSeg, ii;
+
+ pNew = fts5MultiIterAlloc(p, pStruct->nSegment);
+ if( pSmall ){
+ fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p);
+ fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0");
+ }else{
+ fts5BufferSet(&p->rc, &bSeek, nToken, pToken);
+ }
+ if( p->rc ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ pNewIter = &pNew->aSeg[0];
+ pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0);
+ for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
+ for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ int bDone = 0;
+
+ if( pPrevIter ){
+ if( fts5BufferCompare(pSmall, &pPrevIter->term) ){
+ memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter));
+ memset(pPrevIter, 0, sizeof(Fts5SegIter));
+ bDone = 1;
+ }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){
+ fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter);
+ bDone = 1;
+ }
+ }
+
+ if( bDone==0 ){
+ fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter);
+ }
+
+ if( pPrevIter ){
+ if( pPrevIter->pTombArray ){
+ pNewIter->pTombArray = pPrevIter->pTombArray;
+ pNewIter->pTombArray->nRef++;
+ }
+ }else{
+ fts5SegIterAllocTombstone(p, pNewIter);
+ }
+
+ pNewIter++;
+ if( pPrevIter ) pPrevIter++;
+ if( p->rc ) break;
+ }
+ }
+ fts5TokendataSetTermIfEof(pPrev, pSmall);
+
+ pNew->bSkipEmpty = 1;
+ pNew->pColset = pColset;
+ fts5IterSetOutputCb(&p->rc, pNew);
+
+ /* Loop through all segments in the new iterator. Find the smallest
+ ** term that any segment-iterator points to. Iterator pNew will be
+ ** used for this term. Also, set any iterator that points to a term that
+ ** does not match pToken/nToken to point to EOF */
+ pSmall = 0;
+ for(ii=0; ii<pNew->nSeg; ii++){
+ Fts5SegIter *pII = &pNew->aSeg[ii];
+ if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){
+ fts5SegIterSetEOF(pII);
+ }
+ if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){
+ pSmall = &pII->term;
+ }
+ }
+
+ /* If pSmall is still NULL at this point, then the new iterator does
+ ** not point to any terms that match the query. So delete it and break
+ ** out of the loop - all required iterators have been collected. */
+ if( pSmall==0 ){
+ sqlite3Fts5IterClose((Fts5IndexIter*)pNew);
+ break;
+ }
+
+ /* Append this iterator to the set and continue. */
+ pSet = fts5AppendTokendataIter(p, pSet, pNew);
+ }
+
+ if( p->rc==SQLITE_OK && pSet ){
+ int ii;
+ for(ii=0; ii<pSet->nIter; ii++){
+ Fts5Iter *pIter = pSet->apIter[ii];
+ int iSeg;
+ for(iSeg=0; iSeg<pIter->nSeg; iSeg++){
+ pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM;
+ }
+ fts5MultiIterFinishSetup(p, pIter);
+ }
+ }
+
+ if( p->rc==SQLITE_OK ){
+ pRet = fts5MultiIterAlloc(p, 0);
+ }
+ if( pRet ){
+ pRet->pTokenDataIter = pSet;
+ if( pSet ){
+ fts5IterSetOutputsTokendata(pRet);
+ }else{
+ pRet->base.bEof = 1;
+ }
+ }else{
+ fts5TokendataIterDelete(pSet);
+ }
+
+ fts5StructureRelease(pStruct);
+ fts5BufferFree(&bSeek);
+ return pRet;
+}
+
+
+/*
** Open a new iterator to iterate though all rowid that match the
** specified token or token prefix.
*/
@@ -238547,8 +245570,13 @@ static int sqlite3Fts5IndexQuery(
if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){
int iIdx = 0; /* Index to search */
int iPrefixIdx = 0; /* +1 prefix index */
+ int bTokendata = pConfig->bTokendata;
if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken);
+ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){
+ bTokendata = 0;
+ }
+
/* Figure out which index to search and set iIdx accordingly. If this
** is a prefix query for which there is no prefix index, set iIdx to
** greater than pConfig->nPrefix to indicate that the query will be
@@ -238574,7 +245602,10 @@ static int sqlite3Fts5IndexQuery(
}
}
- if( iIdx<=pConfig->nPrefix ){
+ if( bTokendata && iIdx==0 ){
+ buf.p[0] = '0';
+ pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset);
+ }else if( iIdx<=pConfig->nPrefix ){
/* Straight index lookup */
Fts5Structure *pStruct = fts5StructureRead(p);
buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx);
@@ -238621,7 +245652,11 @@ static int sqlite3Fts5IndexQuery(
static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
assert( pIter->pIndex->rc==SQLITE_OK );
- fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 0, 0);
+ }else{
+ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0);
+ }
return fts5IndexReturn(pIter->pIndex);
}
@@ -238654,7 +245689,11 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){
*/
static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
- fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ if( pIter->pTokenDataIter ){
+ fts5TokendataIterNext(pIter, 1, iMatch);
+ }else{
+ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch);
+ }
return fts5IndexReturn(pIter->pIndex);
}
@@ -238670,12 +245709,106 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){
}
/*
+** This is used by xInstToken() to access the token at offset iOff, column
+** iCol of row iRowid. The token is returned via output variables *ppOut
+** and *pnOut. The iterator passed as the first argument must be a tokendata=1
+** iterator (pIter->pTokenDataIter!=0).
+*/
+static int sqlite3Fts5IterToken(
+ Fts5IndexIter *pIndexIter,
+ i64 iRowid,
+ int iCol,
+ int iOff,
+ const char **ppOut, int *pnOut
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5TokenDataMap *aMap = pT->aMap;
+ i64 iPos = (((i64)iCol)<<32) + iOff;
+
+ int i1 = 0;
+ int i2 = pT->nMap;
+ int iTest = 0;
+
+ while( i2>i1 ){
+ iTest = (i1 + i2) / 2;
+
+ if( aMap[iTest].iRowid<iRowid ){
+ i1 = iTest+1;
+ }else if( aMap[iTest].iRowid>iRowid ){
+ i2 = iTest;
+ }else{
+ if( aMap[iTest].iPos<iPos ){
+ if( aMap[iTest].iPos<0 ){
+ break;
+ }
+ i1 = iTest+1;
+ }else if( aMap[iTest].iPos>iPos ){
+ i2 = iTest;
+ }else{
+ break;
+ }
+ }
+ }
+
+ if( i2>i1 ){
+ Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter];
+ *ppOut = (const char*)pMap->aSeg[0].term.p+1;
+ *pnOut = pMap->aSeg[0].term.n-1;
+ }
+
+ return SQLITE_OK;
+}
+
+/*
+** Clear any existing entries from the token-map associated with the
+** iterator passed as the only argument.
+*/
+static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ if( pIter && pIter->pTokenDataIter ){
+ pIter->pTokenDataIter->nMap = 0;
+ }
+}
+
+/*
+** Set a token-mapping for the iterator passed as the first argument. This
+** is used in detail=column or detail=none mode when a token is requested
+** using the xInstToken() API. In this case the caller tokenizers the
+** current row and configures the token-mapping via multiple calls to this
+** function.
+*/
+static int sqlite3Fts5IndexIterWriteTokendata(
+ Fts5IndexIter *pIndexIter,
+ const char *pToken, int nToken,
+ i64 iRowid, int iCol, int iOff
+){
+ Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
+ Fts5TokenDataIter *pT = pIter->pTokenDataIter;
+ Fts5Index *p = pIter->pIndex;
+ int ii;
+
+ assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL );
+ assert( pIter->pTokenDataIter );
+
+ for(ii=0; ii<pT->nIter; ii++){
+ Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term;
+ if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break;
+ }
+ if( ii<pT->nIter ){
+ fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery().
*/
static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){
if( pIndexIter ){
Fts5Iter *pIter = (Fts5Iter*)pIndexIter;
Fts5Index *pIndex = pIter->pIndex;
+ fts5TokendataIterDelete(pIter->pTokenDataIter);
fts5MultiIterFree(pIter);
sqlite3Fts5IndexCloseReader(pIndex);
}
@@ -238759,6 +245892,347 @@ static int sqlite3Fts5IndexLoadConfig(Fts5Index *p){
return fts5IndexReturn(p);
}
+/*
+** Retrieve the origin value that will be used for the segment currently
+** being accumulated in the in-memory hash table when it is flushed to
+** disk. If successful, SQLITE_OK is returned and (*piOrigin) set to
+** the queried value. Or, if an error occurs, an error code is returned
+** and the final value of (*piOrigin) is undefined.
+*/
+static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin){
+ Fts5Structure *pStruct;
+ pStruct = fts5StructureRead(p);
+ if( pStruct ){
+ *piOrigin = pStruct->nOriginCntr;
+ fts5StructureRelease(pStruct);
+ }
+ return fts5IndexReturn(p);
+}
+
+/*
+** Buffer pPg contains a page of a tombstone hash table - one of nPg pages
+** associated with the same segment. This function adds rowid iRowid to
+** the hash table. The caller is required to guarantee that there is at
+** least one free slot on the page.
+**
+** If parameter bForce is false and the hash table is deemed to be full
+** (more than half of the slots are occupied), then non-zero is returned
+** and iRowid not inserted. Or, if bForce is true or if the hash table page
+** is not full, iRowid is inserted and zero returned.
+*/
+static int fts5IndexTombstoneAddToPage(
+ Fts5Data *pPg,
+ int bForce,
+ int nPg,
+ u64 iRowid
+){
+ const int szKey = TOMBSTONE_KEYSIZE(pPg);
+ const int nSlot = TOMBSTONE_NSLOT(pPg);
+ const int nElem = fts5GetU32(&pPg->p[4]);
+ int iSlot = (iRowid / nPg) % nSlot;
+ int nCollide = nSlot;
+
+ if( szKey==4 && iRowid>0xFFFFFFFF ) return 2;
+ if( iRowid==0 ){
+ pPg->p[1] = 0x01;
+ return 0;
+ }
+
+ if( bForce==0 && nElem>=(nSlot/2) ){
+ return 1;
+ }
+
+ fts5PutU32(&pPg->p[4], nElem+1);
+ if( szKey==4 ){
+ u32 *aSlot = (u32*)&pPg->p[8];
+ while( aSlot[iSlot] ){
+ iSlot = (iSlot + 1) % nSlot;
+ if( nCollide--==0 ) return 0;
+ }
+ fts5PutU32((u8*)&aSlot[iSlot], (u32)iRowid);
+ }else{
+ u64 *aSlot = (u64*)&pPg->p[8];
+ while( aSlot[iSlot] ){
+ iSlot = (iSlot + 1) % nSlot;
+ if( nCollide--==0 ) return 0;
+ }
+ fts5PutU64((u8*)&aSlot[iSlot], iRowid);
+ }
+
+ return 0;
+}
+
+/*
+** This function attempts to build a new hash containing all the keys
+** currently in the tombstone hash table for segment pSeg. The new
+** hash will be stored in the nOut buffers passed in array apOut[].
+** All pages of the new hash use key-size szKey (4 or 8).
+**
+** Return 0 if the hash is successfully rebuilt into the nOut pages.
+** Or non-zero if it is not (because one page became overfull). In this
+** case the caller should retry with a larger nOut parameter.
+**
+** Parameter pData1 is page iPg1 of the hash table being rebuilt.
+*/
+static int fts5IndexTombstoneRehash(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */
+ Fts5Data *pData1, /* One page of current hash - or NULL */
+ int iPg1, /* Which page of the current hash is pData1 */
+ int szKey, /* 4 or 8, the keysize */
+ int nOut, /* Number of output pages */
+ Fts5Data **apOut /* Array of output hash pages */
+){
+ int ii;
+ int res = 0;
+
+ /* Initialize the headers of all the output pages */
+ for(ii=0; ii<nOut; ii++){
+ apOut[ii]->p[0] = szKey;
+ fts5PutU32(&apOut[ii]->p[4], 0);
+ }
+
+ /* Loop through the current pages of the hash table. */
+ for(ii=0; res==0 && ii<pSeg->nPgTombstone; ii++){
+ Fts5Data *pData = 0; /* Page ii of the current hash table */
+ Fts5Data *pFree = 0; /* Free this at the end of the loop */
+
+ if( iPg1==ii ){
+ pData = pData1;
+ }else{
+ pFree = pData = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii));
+ }
+
+ if( pData ){
+ int szKeyIn = TOMBSTONE_KEYSIZE(pData);
+ int nSlotIn = (pData->nn - 8) / szKeyIn;
+ int iIn;
+ for(iIn=0; iIn<nSlotIn; iIn++){
+ u64 iVal = 0;
+
+ /* Read the value from slot iIn of the input page into iVal. */
+ if( szKeyIn==4 ){
+ u32 *aSlot = (u32*)&pData->p[8];
+ if( aSlot[iIn] ) iVal = fts5GetU32((u8*)&aSlot[iIn]);
+ }else{
+ u64 *aSlot = (u64*)&pData->p[8];
+ if( aSlot[iIn] ) iVal = fts5GetU64((u8*)&aSlot[iIn]);
+ }
+
+ /* If iVal is not 0 at this point, insert it into the new hash table */
+ if( iVal ){
+ Fts5Data *pPg = apOut[(iVal % nOut)];
+ res = fts5IndexTombstoneAddToPage(pPg, 0, nOut, iVal);
+ if( res ) break;
+ }
+ }
+
+ /* If this is page 0 of the old hash, copy the rowid-0-flag from the
+ ** old hash to the new. */
+ if( ii==0 ){
+ apOut[0]->p[1] = pData->p[1];
+ }
+ }
+ fts5DataRelease(pFree);
+ }
+
+ return res;
+}
+
+/*
+** This is called to rebuild the hash table belonging to segment pSeg.
+** If parameter pData1 is not NULL, then one page of the existing hash table
+** has already been loaded - pData1, which is page iPg1. The key-size for
+** the new hash table is szKey (4 or 8).
+**
+** If successful, the new hash table is not written to disk. Instead,
+** output parameter (*pnOut) is set to the number of pages in the new
+** hash table, and (*papOut) to point to an array of buffers containing
+** the new page data.
+**
+** If an error occurs, an error code is left in the Fts5Index object and
+** both output parameters set to 0 before returning.
+*/
+static void fts5IndexTombstoneRebuild(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */
+ Fts5Data *pData1, /* One page of current hash - or NULL */
+ int iPg1, /* Which page of the current hash is pData1 */
+ int szKey, /* 4 or 8, the keysize */
+ int *pnOut, /* OUT: Number of output pages */
+ Fts5Data ***papOut /* OUT: Output hash pages */
+){
+ const int MINSLOT = 32;
+ int nSlotPerPage = MAX(MINSLOT, (p->pConfig->pgsz - 8) / szKey);
+ int nSlot = 0; /* Number of slots in each output page */
+ int nOut = 0;
+
+ /* Figure out how many output pages (nOut) and how many slots per
+ ** page (nSlot). There are three possibilities:
+ **
+ ** 1. The hash table does not yet exist. In this case the new hash
+ ** table will consist of a single page with MINSLOT slots.
+ **
+ ** 2. The hash table exists but is currently a single page. In this
+ ** case an attempt is made to grow the page to accommodate the new
+ ** entry. The page is allowed to grow up to nSlotPerPage (see above)
+ ** slots.
+ **
+ ** 3. The hash table already consists of more than one page, or of
+ ** a single page already so large that it cannot be grown. In this
+ ** case the new hash consists of (nPg*2+1) pages of nSlotPerPage
+ ** slots each, where nPg is the current number of pages in the
+ ** hash table.
+ */
+ if( pSeg->nPgTombstone==0 ){
+ /* Case 1. */
+ nOut = 1;
+ nSlot = MINSLOT;
+ }else if( pSeg->nPgTombstone==1 ){
+ /* Case 2. */
+ int nElem = (int)fts5GetU32(&pData1->p[4]);
+ assert( pData1 && iPg1==0 );
+ nOut = 1;
+ nSlot = MAX(nElem*4, MINSLOT);
+ if( nSlot>nSlotPerPage ) nOut = 0;
+ }
+ if( nOut==0 ){
+ /* Case 3. */
+ nOut = (pSeg->nPgTombstone * 2 + 1);
+ nSlot = nSlotPerPage;
+ }
+
+ /* Allocate the required array and output pages */
+ while( 1 ){
+ int res = 0;
+ int ii = 0;
+ int szPage = 0;
+ Fts5Data **apOut = 0;
+
+ /* Allocate space for the new hash table */
+ assert( nSlot>=MINSLOT );
+ apOut = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data*) * nOut);
+ szPage = 8 + nSlot*szKey;
+ for(ii=0; ii<nOut; ii++){
+ Fts5Data *pNew = (Fts5Data*)sqlite3Fts5MallocZero(&p->rc,
+ sizeof(Fts5Data)+szPage
+ );
+ if( pNew ){
+ pNew->nn = szPage;
+ pNew->p = (u8*)&pNew[1];
+ apOut[ii] = pNew;
+ }
+ }
+
+ /* Rebuild the hash table. */
+ if( p->rc==SQLITE_OK ){
+ res = fts5IndexTombstoneRehash(p, pSeg, pData1, iPg1, szKey, nOut, apOut);
+ }
+ if( res==0 ){
+ if( p->rc ){
+ fts5IndexFreeArray(apOut, nOut);
+ apOut = 0;
+ nOut = 0;
+ }
+ *pnOut = nOut;
+ *papOut = apOut;
+ break;
+ }
+
+ /* If control flows to here, it was not possible to rebuild the hash
+ ** table. Free all buffers and then try again with more pages. */
+ assert( p->rc==SQLITE_OK );
+ fts5IndexFreeArray(apOut, nOut);
+ nSlot = nSlotPerPage;
+ nOut = nOut*2 + 1;
+ }
+}
+
+
+/*
+** Add a tombstone for rowid iRowid to segment pSeg.
+*/
+static void fts5IndexTombstoneAdd(
+ Fts5Index *p,
+ Fts5StructureSegment *pSeg,
+ u64 iRowid
+){
+ Fts5Data *pPg = 0;
+ int iPg = -1;
+ int szKey = 0;
+ int nHash = 0;
+ Fts5Data **apHash = 0;
+
+ p->nContentlessDelete++;
+
+ if( pSeg->nPgTombstone>0 ){
+ iPg = iRowid % pSeg->nPgTombstone;
+ pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg));
+ if( pPg==0 ){
+ assert( p->rc!=SQLITE_OK );
+ return;
+ }
+
+ if( 0==fts5IndexTombstoneAddToPage(pPg, 0, pSeg->nPgTombstone, iRowid) ){
+ fts5DataWrite(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg), pPg->p, pPg->nn);
+ fts5DataRelease(pPg);
+ return;
+ }
+ }
+
+ /* Have to rebuild the hash table. First figure out the key-size (4 or 8). */
+ szKey = pPg ? TOMBSTONE_KEYSIZE(pPg) : 4;
+ if( iRowid>0xFFFFFFFF ) szKey = 8;
+
+ /* Rebuild the hash table */
+ fts5IndexTombstoneRebuild(p, pSeg, pPg, iPg, szKey, &nHash, &apHash);
+ assert( p->rc==SQLITE_OK || (nHash==0 && apHash==0) );
+
+ /* If all has succeeded, write the new rowid into one of the new hash
+ ** table pages, then write them all out to disk. */
+ if( nHash ){
+ int ii = 0;
+ fts5IndexTombstoneAddToPage(apHash[iRowid % nHash], 1, nHash, iRowid);
+ for(ii=0; ii<nHash; ii++){
+ i64 iTombstoneRowid = FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii);
+ fts5DataWrite(p, iTombstoneRowid, apHash[ii]->p, apHash[ii]->nn);
+ }
+ pSeg->nPgTombstone = nHash;
+ fts5StructureWrite(p, p->pStruct);
+ }
+
+ fts5DataRelease(pPg);
+ fts5IndexFreeArray(apHash, nHash);
+}
+
+/*
+** Add iRowid to the tombstone list of the segment or segments that contain
+** rows from origin iOrigin. Return SQLITE_OK if successful, or an SQLite
+** error code otherwise.
+*/
+static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid){
+ Fts5Structure *pStruct;
+ pStruct = fts5StructureRead(p);
+ if( pStruct ){
+ int bFound = 0; /* True after pSeg->nEntryTombstone incr. */
+ int iLvl;
+ for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){
+ int iSeg;
+ for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){
+ Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg];
+ if( pSeg->iOrigin1<=(u64)iOrigin && pSeg->iOrigin2>=(u64)iOrigin ){
+ if( bFound==0 ){
+ pSeg->nEntryTombstone++;
+ bFound = 1;
+ }
+ fts5IndexTombstoneAdd(p, pSeg, iRowid);
+ }
+ }
+ }
+ fts5StructureRelease(pStruct);
+ }
+ return fts5IndexReturn(p);
+}
/*************************************************************************
**************************************************************************
@@ -238842,7 +246316,9 @@ static int fts5QueryCksum(
int eDetail = p->pConfig->eDetail;
u64 cksum = *pCksum;
Fts5IndexIter *pIter = 0;
- int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter);
+ int rc = sqlite3Fts5IndexQuery(
+ p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter
+ );
while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){
i64 rowid = pIter->iRowid;
@@ -239009,7 +246485,7 @@ static void fts5IndexIntegrityCheckEmpty(
}
static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
- int iTermOff = 0;
+ i64 iTermOff = 0;
int ii;
Fts5Buffer buf1 = {0,0,0};
@@ -239018,7 +246494,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){
ii = pLeaf->szLeaf;
while( ii<pLeaf->nn && p->rc==SQLITE_OK ){
int res;
- int iOff;
+ i64 iOff;
int nIncr;
ii += fts5GetVarint32(&pLeaf->p[ii], nIncr);
@@ -239310,13 +246786,14 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum
** function only.
*/
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** Decode a segment-data rowid from the %_data table. This function is
** the opposite of macro FTS5_SEGMENT_ROWID().
*/
static void fts5DecodeRowid(
i64 iRowid, /* Rowid from %_data table */
+ int *pbTombstone, /* OUT: Tombstone hash flag */
int *piSegid, /* OUT: Segment id */
int *pbDlidx, /* OUT: Dlidx flag */
int *piHeight, /* OUT: Height */
@@ -239332,13 +246809,16 @@ static void fts5DecodeRowid(
iRowid >>= FTS5_DATA_DLI_B;
*piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1));
+ iRowid >>= FTS5_DATA_ID_B;
+
+ *pbTombstone = (int)(iRowid & 0x0001);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
- int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */
- fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno);
+ int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */
+ fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno);
if( iSegid==0 ){
if( iKey==FTS5_AVERAGES_ROWID ){
@@ -239348,14 +246828,16 @@ static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){
}
}
else{
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}",
- bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%s%ssegid=%d h=%d pgno=%d}",
+ bDlidx ? "dlidx " : "",
+ bTomb ? "tombstone " : "",
+ iSegid, iHeight, iPgno
);
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
static void fts5DebugStructure(
int *pRc, /* IN/OUT: error code */
Fts5Buffer *pBuf,
@@ -239370,16 +246852,22 @@ static void fts5DebugStructure(
);
for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
- sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}",
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d",
pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast
);
+ if( pSeg->iOrigin1>0 ){
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " origin=%lld..%lld",
+ pSeg->iOrigin1, pSeg->iOrigin2
+ );
+ }
+ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
}
sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}");
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This is part of the fts5_decode() debugging aid.
**
@@ -239404,9 +246892,9 @@ static void fts5DecodeStructure(
fts5DebugStructure(pRc, pBuf, p);
fts5StructureRelease(p);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This is part of the fts5_decode() debugging aid.
**
@@ -239429,9 +246917,9 @@ static void fts5DecodeAverages(
zSpace = " ";
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** Buffer (a/n) is assumed to contain a list of serialized varints. Read
** each varint and append its string representation to buffer pBuf. Return
@@ -239448,9 +246936,9 @@ static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
}
return iOff;
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The start of buffer (a/n) contains the start of a doclist. The doclist
** may or may not finish within the buffer. This function appends a text
@@ -239483,9 +246971,9 @@ static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){
return iOff;
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** This function is part of the fts5_decode() debugging function. It is
** only ever used with detail=none tables.
@@ -239526,9 +247014,27 @@ static void fts5DecodeRowidList(
sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp);
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
+static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){
+ int ii;
+ fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1);
+ if( *pRc==SQLITE_OK ){
+ for(ii=0; ii<pTerm->n; ii++){
+ if( pTerm->p[ii]==0x00 ){
+ pBuf->p[pBuf->n++] = '\\';
+ pBuf->p[pBuf->n++] = '0';
+ }else{
+ pBuf->p[pBuf->n++] = pTerm->p[ii];
+ }
+ }
+ pBuf->p[pBuf->n] = 0x00;
+ }
+}
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
+
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The implementation of user-defined scalar function fts5_decode().
*/
@@ -239539,6 +247045,7 @@ static void fts5DecodeFunction(
){
i64 iRowid; /* Rowid for record being decoded */
int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */
+ int bTomb;
const u8 *aBlob; int n; /* Record to decode */
u8 *a = 0;
Fts5Buffer s; /* Build up text to return here */
@@ -239561,7 +247068,7 @@ static void fts5DecodeFunction(
if( a==0 ) goto decode_out;
if( n>0 ) memcpy(a, aBlob, n);
- fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno);
+ fts5DecodeRowid(iRowid, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno);
fts5DebugRowid(&rc, &s, iRowid);
if( bDlidx ){
@@ -239580,6 +247087,28 @@ static void fts5DecodeFunction(
" %d(%lld)", lvl.iLeafPgno, lvl.iRowid
);
}
+ }else if( bTomb ){
+ u32 nElem = fts5GetU32(&a[4]);
+ int szKey = (aBlob[0]==4 || aBlob[0]==8) ? aBlob[0] : 8;
+ int nSlot = (n - 8) / szKey;
+ int ii;
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " nElem=%d", (int)nElem);
+ if( aBlob[1] ){
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " 0");
+ }
+ for(ii=0; ii<nSlot; ii++){
+ u64 iVal = 0;
+ if( szKey==4 ){
+ u32 *aSlot = (u32*)&aBlob[8];
+ if( aSlot[ii] ) iVal = fts5GetU32((u8*)&aSlot[ii]);
+ }else{
+ u64 *aSlot = (u64*)&aBlob[8];
+ if( aSlot[ii] ) iVal = fts5GetU64((u8*)&aSlot[ii]);
+ }
+ if( iVal!=0 ){
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " %lld", (i64)iVal);
+ }
+ }
}else if( iSegid==0 ){
if( iRowid==FTS5_AVERAGES_ROWID ){
fts5DecodeAverages(&rc, &s, a, n);
@@ -239605,16 +247134,15 @@ static void fts5DecodeFunction(
fts5DecodeRowidList(&rc, &s, &a[4], iTermOff-4);
iOff = iTermOff;
- while( iOff<szLeaf ){
+ while( iOff<szLeaf && rc==SQLITE_OK ){
int nAppend;
/* Read the term data for the next term*/
iOff += fts5GetVarint32(&a[iOff], nAppend);
term.n = nKeep;
fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]);
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " term=");
+ fts5BufferAppendTerm(&rc, &s, &term);
iOff += nAppend;
/* Figure out where the doclist for this term ends */
@@ -239625,8 +247153,11 @@ static void fts5DecodeFunction(
}else{
iTermOff = szLeaf;
}
-
- fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff);
+ if( iTermOff>szLeaf ){
+ rc = FTS5_CORRUPT;
+ }else{
+ fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff);
+ }
iOff = iTermOff;
if( iOff<szLeaf ){
iOff += fts5GetVarint32(&a[iOff], nKeep);
@@ -239719,9 +247250,8 @@ static void fts5DecodeFunction(
fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]);
iOff += nByte;
- sqlite3Fts5BufferAppendPrintf(
- &rc, &s, " term=%.*s", term.n, (const char*)term.p
- );
+ sqlite3Fts5BufferAppendPrintf(&rc, &s, " term=");
+ fts5BufferAppendTerm(&rc, &s, &term);
iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff);
}
@@ -239737,9 +247267,9 @@ static void fts5DecodeFunction(
}
fts5BufferFree(&s);
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
/*
** The implementation of user-defined scalar function fts5_rowid().
*/
@@ -239773,7 +247303,235 @@ static void fts5RowidFunction(
}
}
}
-#endif /* SQLITE_TEST */
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
+
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
+
+typedef struct Fts5StructVtab Fts5StructVtab;
+struct Fts5StructVtab {
+ sqlite3_vtab base;
+};
+
+typedef struct Fts5StructVcsr Fts5StructVcsr;
+struct Fts5StructVcsr {
+ sqlite3_vtab_cursor base;
+ Fts5Structure *pStruct;
+ int iLevel;
+ int iSeg;
+ int iRowid;
+};
+
+/*
+** Create a new fts5_structure() table-valued function.
+*/
+static int fts5structConnectMethod(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ Fts5StructVtab *pNew = 0;
+ int rc = SQLITE_OK;
+
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE xyz("
+ "level, segment, merge, segid, leaf1, leaf2, loc1, loc2, "
+ "npgtombstone, nentrytombstone, nentry, struct HIDDEN);"
+ );
+ if( rc==SQLITE_OK ){
+ pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew));
+ }
+
+ *ppVtab = (sqlite3_vtab*)pNew;
+ return rc;
+}
+
+/*
+** We must have a single struct=? constraint that will be passed through
+** into the xFilter method. If there is no valid stmt=? constraint,
+** then return an SQLITE_CONSTRAINT error.
+*/
+static int fts5structBestIndexMethod(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ int i;
+ int rc = SQLITE_CONSTRAINT;
+ struct sqlite3_index_constraint *p;
+ pIdxInfo->estimatedCost = (double)100;
+ pIdxInfo->estimatedRows = 100;
+ pIdxInfo->idxNum = 0;
+ for(i=0, p=pIdxInfo->aConstraint; i<pIdxInfo->nConstraint; i++, p++){
+ if( p->usable==0 ) continue;
+ if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==11 ){
+ rc = SQLITE_OK;
+ pIdxInfo->aConstraintUsage[i].omit = 1;
+ pIdxInfo->aConstraintUsage[i].argvIndex = 1;
+ break;
+ }
+ }
+ return rc;
+}
+
+/*
+** This method is the destructor for bytecodevtab objects.
+*/
+static int fts5structDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts5StructVtab *p = (Fts5StructVtab*)pVtab;
+ sqlite3_free(p);
+ return SQLITE_OK;
+}
+
+/*
+** Constructor for a new bytecodevtab_cursor object.
+*/
+static int fts5structOpenMethod(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCsr){
+ int rc = SQLITE_OK;
+ Fts5StructVcsr *pNew = 0;
+
+ pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew));
+ *ppCsr = (sqlite3_vtab_cursor*)pNew;
+
+ return SQLITE_OK;
+}
+
+/*
+** Destructor for a bytecodevtab_cursor.
+*/
+static int fts5structCloseMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ fts5StructureRelease(pCsr->pStruct);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+
+/*
+** Advance a bytecodevtab_cursor to its next row of output.
+*/
+static int fts5structNextMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ Fts5Structure *p = pCsr->pStruct;
+
+ assert( pCsr->pStruct );
+ pCsr->iSeg++;
+ pCsr->iRowid++;
+ while( pCsr->iLevel<p->nLevel && pCsr->iSeg>=p->aLevel[pCsr->iLevel].nSeg ){
+ pCsr->iLevel++;
+ pCsr->iSeg = 0;
+ }
+ if( pCsr->iLevel>=p->nLevel ){
+ fts5StructureRelease(pCsr->pStruct);
+ pCsr->pStruct = 0;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Return TRUE if the cursor has been moved off of the last
+** row of output.
+*/
+static int fts5structEofMethod(sqlite3_vtab_cursor *cur){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ return pCsr->pStruct==0;
+}
+
+static int fts5structRowidMethod(
+ sqlite3_vtab_cursor *cur,
+ sqlite_int64 *piRowid
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ *piRowid = pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Return values of columns for the row at which the bytecodevtab_cursor
+** is currently pointing.
+*/
+static int fts5structColumnMethod(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur;
+ Fts5Structure *p = pCsr->pStruct;
+ Fts5StructureSegment *pSeg = &p->aLevel[pCsr->iLevel].aSeg[pCsr->iSeg];
+
+ switch( i ){
+ case 0: /* level */
+ sqlite3_result_int(ctx, pCsr->iLevel);
+ break;
+ case 1: /* segment */
+ sqlite3_result_int(ctx, pCsr->iSeg);
+ break;
+ case 2: /* merge */
+ sqlite3_result_int(ctx, pCsr->iSeg < p->aLevel[pCsr->iLevel].nMerge);
+ break;
+ case 3: /* segid */
+ sqlite3_result_int(ctx, pSeg->iSegid);
+ break;
+ case 4: /* leaf1 */
+ sqlite3_result_int(ctx, pSeg->pgnoFirst);
+ break;
+ case 5: /* leaf2 */
+ sqlite3_result_int(ctx, pSeg->pgnoLast);
+ break;
+ case 6: /* origin1 */
+ sqlite3_result_int64(ctx, pSeg->iOrigin1);
+ break;
+ case 7: /* origin2 */
+ sqlite3_result_int64(ctx, pSeg->iOrigin2);
+ break;
+ case 8: /* npgtombstone */
+ sqlite3_result_int(ctx, pSeg->nPgTombstone);
+ break;
+ case 9: /* nentrytombstone */
+ sqlite3_result_int64(ctx, pSeg->nEntryTombstone);
+ break;
+ case 10: /* nentry */
+ sqlite3_result_int64(ctx, pSeg->nEntry);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Initialize a cursor.
+**
+** idxNum==0 means show all subprograms
+** idxNum==1 means show only the main bytecode and omit subprograms.
+*/
+static int fts5structFilterMethod(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ Fts5StructVcsr *pCsr = (Fts5StructVcsr *)pVtabCursor;
+ int rc = SQLITE_OK;
+
+ const u8 *aBlob = 0;
+ int nBlob = 0;
+
+ assert( argc==1 );
+ fts5StructureRelease(pCsr->pStruct);
+ pCsr->pStruct = 0;
+
+ nBlob = sqlite3_value_bytes(argv[0]);
+ aBlob = (const u8*)sqlite3_value_blob(argv[0]);
+ rc = fts5StructureDecode(aBlob, nBlob, 0, &pCsr->pStruct);
+ if( rc==SQLITE_OK ){
+ pCsr->iLevel = 0;
+ pCsr->iRowid = 0;
+ pCsr->iSeg = -1;
+ rc = fts5structNextMethod(pVtabCursor);
+ }
+
+ return rc;
+}
+
+#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */
/*
** This is called as part of registering the FTS5 module with database
@@ -239784,7 +247542,7 @@ static void fts5RowidFunction(
** SQLite error code is returned instead.
*/
static int sqlite3Fts5IndexInit(sqlite3 *db){
-#ifdef SQLITE_TEST
+#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG)
int rc = sqlite3_create_function(
db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0
);
@@ -239801,6 +247559,37 @@ static int sqlite3Fts5IndexInit(sqlite3 *db){
db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0
);
}
+
+ if( rc==SQLITE_OK ){
+ static const sqlite3_module fts5structure_module = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ fts5structConnectMethod, /* xConnect */
+ fts5structBestIndexMethod, /* xBestIndex */
+ fts5structDisconnectMethod, /* xDisconnect */
+ 0, /* xDestroy */
+ fts5structOpenMethod, /* xOpen */
+ fts5structCloseMethod, /* xClose */
+ fts5structFilterMethod, /* xFilter */
+ fts5structNextMethod, /* xNext */
+ fts5structEofMethod, /* xEof */
+ fts5structColumnMethod, /* xColumn */
+ fts5structRowidMethod, /* xRowid */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindFunction */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0, /* xRollbackTo */
+ 0, /* xShadowName */
+ 0 /* xIntegrity */
+ };
+ rc = sqlite3_create_module(db, "fts5_structure", &fts5structure_module, 0);
+ }
return rc;
#else
return SQLITE_OK;
@@ -239936,6 +247725,8 @@ struct Fts5FullTable {
Fts5Storage *pStorage; /* Document store */
Fts5Global *pGlobal; /* Global (connection wide) data */
Fts5Cursor *pSortCsr; /* Sort data from this cursor */
+ int iSavepoint; /* Successful xSavepoint()+1 */
+
#ifdef SQLITE_DEBUG
struct Fts5TransactionState ts;
#endif
@@ -240224,6 +248015,13 @@ static int fts5InitVtab(
pConfig->pzErrmsg = 0;
}
+ if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ rc = sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, (int)1);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS);
+ }
+
if( rc!=SQLITE_OK ){
fts5FreeVtab(pTab);
pTab = 0;
@@ -240466,12 +248264,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){
}
idxStr[iIdxStr] = '\0';
- /* Set idxFlags flags for the ORDER BY clause */
+ /* Set idxFlags flags for the ORDER BY clause
+ **
+ ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC".
+ */
if( pInfo->nOrderBy==1 ){
int iSort = pInfo->aOrderBy[0].iColumn;
if( iSort==(pConfig->nCol+1) && bSeenMatch ){
idxFlags |= FTS5_BI_ORDER_RANK;
- }else if( iSort==-1 ){
+ }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){
idxFlags |= FTS5_BI_ORDER_ROWID;
}
if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){
@@ -240723,6 +248524,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){
);
assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) );
+ /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table,
+ ** clear any token mappings accumulated at the fts5_index.c level. In
+ ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH,
+ ** we need to retain the mappings for the entire query. */
+ if( pCsr->ePlan==FTS5_PLAN_MATCH
+ && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata
+ ){
+ sqlite3Fts5ExprClearTokens(pCsr->pExpr);
+ }
+
if( pCsr->ePlan<3 ){
int bSkip = 0;
if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc;
@@ -241148,6 +248959,9 @@ static int fts5FilterMethod(
pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64);
}
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ if( rc!=SQLITE_OK ) goto filter_out;
+
if( pTab->pSortCsr ){
/* If pSortCsr is non-NULL, then this call is being made as part of
** processing for a "... MATCH <expr> ORDER BY rank" query (ePlan is
@@ -241170,6 +248984,7 @@ static int fts5FilterMethod(
pCsr->pExpr = pTab->pSortCsr->pExpr;
rc = fts5CursorFirst(pTab, pCsr, bDesc);
}else if( pCsr->pExpr ){
+ assert( rc==SQLITE_OK );
rc = fts5CursorParseRank(pConfig, pCsr, pRank);
if( rc==SQLITE_OK ){
if( bOrderByRank ){
@@ -241341,6 +249156,7 @@ static int fts5SpecialInsert(
Fts5Config *pConfig = pTab->p.pConfig;
int rc = SQLITE_OK;
int bError = 0;
+ int bLoadConfig = 0;
if( 0==sqlite3_stricmp("delete-all", zCmd) ){
if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
@@ -241352,6 +249168,7 @@ static int fts5SpecialInsert(
}else{
rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage);
}
+ bLoadConfig = 1;
}else if( 0==sqlite3_stricmp("rebuild", zCmd) ){
if( pConfig->eContent==FTS5_CONTENT_NONE ){
fts5SetVtabError(pTab,
@@ -241361,6 +249178,7 @@ static int fts5SpecialInsert(
}else{
rc = sqlite3Fts5StorageRebuild(pTab->pStorage);
}
+ bLoadConfig = 1;
}else if( 0==sqlite3_stricmp("optimize", zCmd) ){
rc = sqlite3Fts5StorageOptimize(pTab->pStorage);
}else if( 0==sqlite3_stricmp("merge", zCmd) ){
@@ -241373,8 +249191,13 @@ static int fts5SpecialInsert(
}else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){
pConfig->bPrefixIndex = sqlite3_value_int(pVal);
#endif
+ }else if( 0==sqlite3_stricmp("flush", zCmd) ){
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
}else{
- rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ }
if( rc==SQLITE_OK ){
rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError);
}
@@ -241386,6 +249209,12 @@ static int fts5SpecialInsert(
}
}
}
+
+ if( rc==SQLITE_OK && bLoadConfig ){
+ pTab->p.pConfig->iCookie--;
+ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex);
+ }
+
return rc;
}
@@ -241444,7 +249273,6 @@ static int fts5UpdateMethod(
int rc = SQLITE_OK; /* Return code */
int bUpdateOrDelete = 0;
-
/* A transaction must be open when this is called. */
assert( pTab->ts.eState==1 || pTab->ts.eState==2 );
@@ -241473,7 +249301,14 @@ static int fts5UpdateMethod(
if( pConfig->eContent!=FTS5_CONTENT_NORMAL
&& 0==sqlite3_stricmp("delete", z)
){
- rc = fts5SpecialDelete(pTab, apVal);
+ if( pConfig->bContentlessDelete ){
+ fts5SetVtabError(pTab,
+ "'delete' may not be used with a contentless_delete=1 table"
+ );
+ rc = SQLITE_ERROR;
+ }else{
+ rc = fts5SpecialDelete(pTab, apVal);
+ }
}else{
rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]);
}
@@ -241490,7 +249325,7 @@ static int fts5UpdateMethod(
** Cases 3 and 4 may violate the rowid constraint.
*/
int eConflict = SQLITE_ABORT;
- if( pConfig->eContent==FTS5_CONTENT_NORMAL ){
+ if( pConfig->eContent==FTS5_CONTENT_NORMAL || pConfig->bContentlessDelete ){
eConflict = sqlite3_vtab_on_conflict(pConfig->db);
}
@@ -241498,8 +249333,12 @@ static int fts5UpdateMethod(
assert( nArg!=1 || eType0==SQLITE_INTEGER );
/* Filter out attempts to run UPDATE or DELETE on contentless tables.
- ** This is not suported. */
- if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){
+ ** This is not suported. Except - they are both supported if the CREATE
+ ** VIRTUAL TABLE statement contained "contentless_delete=1". */
+ if( eType0==SQLITE_INTEGER
+ && pConfig->eContent==FTS5_CONTENT_NONE
+ && pConfig->bContentlessDelete==0
+ ){
pTab->p.base.zErrMsg = sqlite3_mprintf(
"cannot %s contentless fts5 table: %s",
(nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
@@ -241523,7 +249362,8 @@ static int fts5UpdateMethod(
}
else if( eType0!=SQLITE_INTEGER ){
- /* If this is a REPLACE, first remove the current entry (if any) */
+ /* An INSERT statement. If the conflict-mode is REPLACE, first remove
+ ** the current entry (if any). */
if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){
i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */
rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
@@ -241586,8 +249426,7 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
fts5CheckTransactionState(pTab, FTS5_SYNC, 0);
pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
- fts5TripCursors(pTab);
- rc = sqlite3Fts5StorageSync(pTab->pStorage);
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
pTab->p.pConfig->pzErrmsg = 0;
return rc;
}
@@ -241683,7 +249522,10 @@ static int fts5ApiColumnText(
){
int rc = SQLITE_OK;
Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
- if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
+ Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab);
+ if( iCol<0 || iCol>=pTab->pConfig->nCol ){
+ rc = SQLITE_RANGE;
+ }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab))
|| pCsr->ePlan==FTS5_PLAN_SPECIAL
){
*pz = 0;
@@ -241708,8 +249550,9 @@ static int fts5CsrPoslist(
int rc = SQLITE_OK;
int bLive = (pCsr->pSorter==0);
- if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
-
+ if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){
+ rc = SQLITE_RANGE;
+ }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){
if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
Fts5PoslistPopulator *aPopulator;
int i;
@@ -241733,15 +249576,21 @@ static int fts5CsrPoslist(
CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST);
}
- if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
- Fts5Sorter *pSorter = pCsr->pSorter;
- int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
- *pn = pSorter->aIdx[iPhrase] - i1;
- *pa = &pSorter->aPoslist[i1];
+ if( rc==SQLITE_OK ){
+ if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){
+ Fts5Sorter *pSorter = pCsr->pSorter;
+ int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]);
+ *pn = pSorter->aIdx[iPhrase] - i1;
+ *pa = &pSorter->aPoslist[i1];
+ }else{
+ *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ }
}else{
- *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa);
+ *pa = 0;
+ *pn = 0;
}
+
return rc;
}
@@ -241848,12 +249697,6 @@ static int fts5ApiInst(
){
if( iIdx<0 || iIdx>=pCsr->nInstCount ){
rc = SQLITE_RANGE;
-#if 0
- }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){
- *piPhrase = pCsr->aInst[iIdx*3];
- *piCol = pCsr->aInst[iIdx*3 + 2];
- *piOff = -1;
-#endif
}else{
*piPhrase = pCsr->aInst[iIdx*3];
*piCol = pCsr->aInst[iIdx*3 + 1];
@@ -242108,13 +249951,56 @@ static int fts5ApiPhraseFirstColumn(
return rc;
}
+/*
+** xQueryToken() API implemenetation.
+*/
+static int fts5ApiQueryToken(
+ Fts5Context* pCtx,
+ int iPhrase,
+ int iToken,
+ const char **ppOut,
+ int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut);
+}
+
+/*
+** xInstToken() API implemenetation.
+*/
+static int fts5ApiInstToken(
+ Fts5Context *pCtx,
+ int iIdx,
+ int iToken,
+ const char **ppOut, int *pnOut
+){
+ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx;
+ int rc = SQLITE_OK;
+ if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0
+ || SQLITE_OK==(rc = fts5CacheInstArray(pCsr))
+ ){
+ if( iIdx<0 || iIdx>=pCsr->nInstCount ){
+ rc = SQLITE_RANGE;
+ }else{
+ int iPhrase = pCsr->aInst[iIdx*3];
+ int iCol = pCsr->aInst[iIdx*3 + 1];
+ int iOff = pCsr->aInst[iIdx*3 + 2];
+ i64 iRowid = fts5CursorRowid(pCsr);
+ rc = sqlite3Fts5ExprInstToken(
+ pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut
+ );
+ }
+ }
+ return rc;
+}
+
static int fts5ApiQueryPhrase(Fts5Context*, int, void*,
int(*)(const Fts5ExtensionApi*, Fts5Context*, void*)
);
static const Fts5ExtensionApi sFts5Api = {
- 2, /* iVersion */
+ 3, /* iVersion */
fts5ApiUserData,
fts5ApiColumnCount,
fts5ApiRowCount,
@@ -242134,6 +250020,8 @@ static const Fts5ExtensionApi sFts5Api = {
fts5ApiPhraseNext,
fts5ApiPhraseFirstColumn,
fts5ApiPhraseNextColumn,
+ fts5ApiQueryToken,
+ fts5ApiInstToken
};
/*
@@ -242354,6 +250242,12 @@ static int fts5ColumnMethod(
sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1));
}
pConfig->pzErrmsg = 0;
+ }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){
+ char *zErr = sqlite3_mprintf("cannot UPDATE a subset of "
+ "columns on fts5 contentless-delete table: %s", pConfig->zName
+ );
+ sqlite3_result_error(pCtx, zErr, -1);
+ sqlite3_free(zErr);
}
return rc;
}
@@ -242392,8 +250286,10 @@ static int fts5RenameMethod(
sqlite3_vtab *pVtab, /* Virtual table handle */
const char *zName /* New name of table */
){
+ int rc;
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
- return sqlite3Fts5StorageRename(pTab->pStorage, zName);
+ rc = sqlite3Fts5StorageRename(pTab->pStorage, zName);
+ return rc;
}
static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){
@@ -242407,9 +250303,15 @@ static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){
** Flush the contents of the pending-terms table to disk.
*/
static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
- fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint);
- return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc = SQLITE_OK;
+
+ fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint);
+ rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint+1;
+ }
+ return rc;
}
/*
@@ -242418,9 +250320,16 @@ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
** This is a no-op.
*/
static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
- fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint);
- return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab);
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc = SQLITE_OK;
+ fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint);
+ if( (iSavepoint+1)<pTab->iSavepoint ){
+ rc = sqlite3Fts5FlushToDisk(&pTab->p);
+ if( rc==SQLITE_OK ){
+ pTab->iSavepoint = iSavepoint;
+ }
+ }
+ return rc;
}
/*
@@ -242430,11 +250339,14 @@ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){
*/
static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
- UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */
+ int rc = SQLITE_OK;
fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint);
fts5TripCursors(pTab);
- pTab->p.pConfig->pgsz = 0;
- return sqlite3Fts5StorageRollback(pTab->pStorage);
+ if( (iSavepoint+1)<=pTab->iSavepoint ){
+ pTab->p.pConfig->pgsz = 0;
+ rc = sqlite3Fts5StorageRollback(pTab->pStorage);
+ }
+ return rc;
}
/*
@@ -242636,7 +250548,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a", -1, SQLITE_TRANSIENT);
}
/*
@@ -242654,9 +250566,40 @@ static int fts5ShadowName(const char *zName){
return 0;
}
+/*
+** Run an integrity check on the FTS5 data structures. Return a string
+** if anything is found amiss. Return a NULL pointer if everything is
+** OK.
+*/
+static int fts5IntegrityMethod(
+ sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */
+ const char *zSchema, /* Name of schema in which this table lives */
+ const char *zTabname, /* Name of the table itself */
+ int isQuick, /* True if this is a quick-check */
+ char **pzErr /* Write error message here */
+){
+ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
+ int rc;
+
+ assert( pzErr!=0 && *pzErr==0 );
+ UNUSED_PARAM(isQuick);
+ rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0);
+ if( (rc&0xff)==SQLITE_CORRUPT ){
+ *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s",
+ zSchema, zTabname);
+ }else if( rc!=SQLITE_OK ){
+ *pzErr = sqlite3_mprintf("unable to validate the inverted index for"
+ " FTS5 table %s.%s: %s",
+ zSchema, zTabname, sqlite3_errstr(rc));
+ }
+ sqlite3Fts5IndexCloseReader(pTab->p.pIndex);
+
+ return SQLITE_OK;
+}
+
static int fts5Init(sqlite3 *db){
static const sqlite3_module fts5Mod = {
- /* iVersion */ 3,
+ /* iVersion */ 4,
/* xCreate */ fts5CreateMethod,
/* xConnect */ fts5ConnectMethod,
/* xBestIndex */ fts5BestIndexMethod,
@@ -242679,7 +250622,8 @@ static int fts5Init(sqlite3 *db){
/* xSavepoint */ fts5SavepointMethod,
/* xRelease */ fts5ReleaseMethod,
/* xRollbackTo */ fts5RollbackToMethod,
- /* xShadowName */ fts5ShadowName
+ /* xShadowName */ fts5ShadowName,
+ /* xIntegrity */ fts5IntegrityMethod
};
int rc;
@@ -242849,10 +250793,10 @@ static int fts5StorageGetStmt(
"INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */
"REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */
"DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */
- "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */
+ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?%s)", /* REPLACE_DOCSIZE */
"DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */
- "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
+ "SELECT sz%s FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */
"REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */
"SELECT %s FROM %s AS T", /* SCAN */
@@ -242900,6 +250844,19 @@ static int fts5StorageGetStmt(
break;
}
+ case FTS5_STMT_REPLACE_DOCSIZE:
+ zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName,
+ (pC->bContentlessDelete ? ",?" : "")
+ );
+ break;
+
+ case FTS5_STMT_LOOKUP_DOCSIZE:
+ zSql = sqlite3_mprintf(azStmt[eStmt],
+ (pC->bContentlessDelete ? ",origin" : ""),
+ pC->zDb, pC->zName
+ );
+ break;
+
default:
zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName);
break;
@@ -243089,9 +251046,11 @@ static int sqlite3Fts5StorageOpen(
}
if( rc==SQLITE_OK && pConfig->bColumnsize ){
- rc = sqlite3Fts5CreateTable(
- pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr
- );
+ const char *zCols = "id INTEGER PRIMARY KEY, sz BLOB";
+ if( pConfig->bContentlessDelete ){
+ zCols = "id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER";
+ }
+ rc = sqlite3Fts5CreateTable(pConfig, "docsize", zCols, 0, pzErr);
}
if( rc==SQLITE_OK ){
rc = sqlite3Fts5CreateTable(
@@ -243168,7 +251127,7 @@ static int fts5StorageDeleteFromIndex(
){
Fts5Config *pConfig = p->pConfig;
sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */
- int rc; /* Return code */
+ int rc = SQLITE_OK; /* Return code */
int rc2; /* sqlite3_reset() return code */
int iCol;
Fts5InsertCtx ctx;
@@ -243184,7 +251143,6 @@ static int fts5StorageDeleteFromIndex(
ctx.pStorage = p;
ctx.iCol = -1;
- rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){
if( pConfig->abUnindexed[iCol-1]==0 ){
const char *zText;
@@ -243221,6 +251179,37 @@ static int fts5StorageDeleteFromIndex(
return rc;
}
+/*
+** This function is called to process a DELETE on a contentless_delete=1
+** table. It adds the tombstone required to delete the entry with rowid
+** iDel. If successful, SQLITE_OK is returned. Or, if an error occurs,
+** an SQLite error code.
+*/
+static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){
+ i64 iOrigin = 0;
+ sqlite3_stmt *pLookup = 0;
+ int rc = SQLITE_OK;
+
+ assert( p->pConfig->bContentlessDelete );
+ assert( p->pConfig->eContent==FTS5_CONTENT_NONE );
+
+ /* Look up the origin of the document in the %_docsize table. Store
+ ** this in stack variable iOrigin. */
+ rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_int64(pLookup, 1, iDel);
+ if( SQLITE_ROW==sqlite3_step(pLookup) ){
+ iOrigin = sqlite3_column_int64(pLookup, 1);
+ }
+ rc = sqlite3_reset(pLookup);
+ }
+
+ if( rc==SQLITE_OK && iOrigin!=0 ){
+ rc = sqlite3Fts5IndexContentlessDelete(p->pIndex, iOrigin, iDel);
+ }
+
+ return rc;
+}
/*
** Insert a record into the %_docsize table. Specifically, do:
@@ -243241,10 +251230,17 @@ static int fts5StorageInsertDocsize(
rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0);
if( rc==SQLITE_OK ){
sqlite3_bind_int64(pReplace, 1, iRowid);
- sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
- sqlite3_step(pReplace);
- rc = sqlite3_reset(pReplace);
- sqlite3_bind_null(pReplace, 2);
+ if( p->pConfig->bContentlessDelete ){
+ i64 iOrigin = 0;
+ rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin);
+ sqlite3_bind_int64(pReplace, 3, iOrigin);
+ }
+ if( rc==SQLITE_OK ){
+ sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC);
+ sqlite3_step(pReplace);
+ rc = sqlite3_reset(pReplace);
+ sqlite3_bind_null(pReplace, 2);
+ }
}
}
return rc;
@@ -243308,7 +251304,15 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap
/* Delete the index records */
if( rc==SQLITE_OK ){
- rc = fts5StorageDeleteFromIndex(p, iDel, apVal);
+ rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel);
+ }
+
+ if( rc==SQLITE_OK ){
+ if( p->pConfig->bContentlessDelete ){
+ rc = fts5StorageContentlessDelete(p, iDel);
+ }else{
+ rc = fts5StorageDeleteFromIndex(p, iDel, apVal);
+ }
}
/* Delete the %_docsize record */
@@ -243385,7 +251389,7 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){
}
if( rc==SQLITE_OK ){
- rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0);
+ rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg);
}
while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){
@@ -243896,7 +251900,9 @@ static int sqlite3Fts5StorageSync(Fts5Storage *p){
i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db);
if( p->bTotalsValid ){
rc = fts5StorageSaveTotals(p);
- p->bTotalsValid = 0;
+ if( rc==SQLITE_OK ){
+ p->bTotalsValid = 0;
+ }
}
if( rc==SQLITE_OK ){
rc = sqlite3Fts5IndexSync(p->pIndex);
@@ -244170,6 +252176,12 @@ static const unsigned char sqlite3Utf8Trans1[] = {
#endif /* ifndef SQLITE_AMALGAMATION */
+#define FTS5_SKIP_UTF8(zIn) { \
+ if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \
+ while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \
+ } \
+}
+
typedef struct Unicode61Tokenizer Unicode61Tokenizer;
struct Unicode61Tokenizer {
unsigned char aTokenChar[128]; /* ASCII range token characters */
@@ -245205,6 +253217,7 @@ static int fts5PorterTokenize(
typedef struct TrigramTokenizer TrigramTokenizer;
struct TrigramTokenizer {
int bFold; /* True to fold to lower-case */
+ int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */
};
/*
@@ -245231,6 +253244,7 @@ static int fts5TriCreate(
}else{
int i;
pNew->bFold = 1;
+ pNew->iFoldParam = 0;
for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
const char *zArg = azArg[i+1];
if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
@@ -245239,10 +253253,21 @@ static int fts5TriCreate(
}else{
pNew->bFold = (zArg[0]=='0');
}
+ }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
+ if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
+ rc = SQLITE_ERROR;
+ }else{
+ pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0;
+ }
}else{
rc = SQLITE_ERROR;
}
}
+
+ if( pNew->iFoldParam!=0 && pNew->bFold==0 ){
+ rc = SQLITE_ERROR;
+ }
+
if( rc!=SQLITE_OK ){
fts5TriDelete((Fts5Tokenizer*)pNew);
pNew = 0;
@@ -245265,40 +253290,62 @@ static int fts5TriTokenize(
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
int rc = SQLITE_OK;
char aBuf[32];
+ char *zOut = aBuf;
+ int ii;
const unsigned char *zIn = (const unsigned char*)pText;
const unsigned char *zEof = &zIn[nText];
u32 iCode;
+ int aStart[3]; /* Input offset of each character in aBuf[] */
UNUSED_PARAM(unusedFlags);
- while( 1 ){
- char *zOut = aBuf;
- int iStart = zIn - (const unsigned char*)pText;
- const unsigned char *zNext;
-
- READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- zNext = zIn;
- if( zIn<zEof ){
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
+
+ /* Populate aBuf[] with the characters for the first trigram. */
+ for(ii=0; ii<3; ii++){
+ do {
+ aStart[ii] = zIn - (const unsigned char*)pText;
READ_UTF8(zIn, zEof, iCode);
- if( iCode==0 ) break;
- }else{
- break;
- }
- if( zIn<zEof ){
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
+ if( iCode==0 ) return SQLITE_OK;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+ WRITE_UTF8(zOut, iCode);
+ }
+
+ /* At the start of each iteration of this loop:
+ **
+ ** aBuf: Contains 3 characters. The 3 characters of the next trigram.
+ ** zOut: Points to the byte following the last character in aBuf.
+ ** aStart[3]: Contains the byte offset in the input text corresponding
+ ** to the start of each of the three characters in the buffer.
+ */
+ assert( zIn<=zEof );
+ while( 1 ){
+ int iNext; /* Start of character following current tri */
+ const char *z1;
+
+ /* Read characters from the input up until the first non-diacritic */
+ do {
+ iNext = zIn - (const unsigned char*)pText;
READ_UTF8(zIn, zEof, iCode);
if( iCode==0 ) break;
- if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
- WRITE_UTF8(zOut, iCode);
- }else{
- break;
- }
- rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf);
- if( rc!=SQLITE_OK ) break;
- zIn = zNext;
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam);
+ }while( iCode==0 );
+
+ /* Pass the current trigram back to fts5 */
+ rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext);
+ if( iCode==0 || rc!=SQLITE_OK ) break;
+
+ /* Remove the first character from buffer aBuf[]. Append the character
+ ** with codepoint iCode. */
+ z1 = aBuf;
+ FTS5_SKIP_UTF8(z1);
+ memmove(aBuf, z1, zOut - z1);
+ zOut -= (z1 - aBuf);
+ WRITE_UTF8(zOut, iCode);
+
+ /* Update the aStart[] array */
+ aStart[0] = aStart[1];
+ aStart[1] = aStart[2];
+ aStart[2] = iNext;
}
return rc;
@@ -245321,7 +253368,9 @@ static int sqlite3Fts5TokenizerPattern(
){
if( xCreate==fts5TriCreate ){
TrigramTokenizer *p = (TrigramTokenizer*)pTok;
- return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ if( p->iFoldParam==0 ){
+ return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
+ }
}
return FTS5_PATTERN_NONE;
}
@@ -247110,7 +255159,7 @@ static int fts5VocabFilterMethod(
if( pEq ){
zTerm = (const char *)sqlite3_value_text(pEq);
nTerm = sqlite3_value_bytes(pEq);
- f = 0;
+ f = FTS5INDEX_QUERY_NOTOKENDATA;
}else{
if( pGe ){
zTerm = (const char *)sqlite3_value_text(pGe);
@@ -247264,7 +255313,8 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
/* xSavepoint */ 0,
/* xRelease */ 0,
/* xRollbackTo */ 0,
- /* xShadowName */ 0
+ /* xShadowName */ 0,
+ /* xIntegrity */ 0
};
void *p = (void*)pGlobal;
@@ -247593,6 +255643,7 @@ static sqlite3_module stmtModule = {
0, /* xRelease */
0, /* xRollbackTo */
0, /* xShadowName */
+ 0 /* xIntegrity */
};
#endif /* SQLITE_OMIT_VIRTUALTABLE */
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h
index 57eac0a..a07a519 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3-binding.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3-binding.h
@@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.42.0"
-#define SQLITE_VERSION_NUMBER 3042000
-#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0"
+#define SQLITE_VERSION "3.45.1"
+#define SQLITE_VERSION_NUMBER 3045001
+#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -529,6 +529,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8))
+#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
@@ -1191,7 +1192,7 @@ struct sqlite3_io_methods {
** by clients within the current process, only within other processes.
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the
** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
@@ -2127,7 +2128,7 @@ struct sqlite3_mem_methods {
** is stored in each sorted record and the required column values loaded
** from the database as records are returned in sorted order. The default
** value for this option is to never use this optimization. Specifying a
-** negative value for this option restores the default behaviour.
+** negative value for this option restores the default behavior.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
@@ -2302,7 +2303,7 @@ struct sqlite3_mem_methods {
** database handle, SQLite checks if this will mean that there are now no
** connections at all to the database. If so, it performs a checkpoint
** operation before closing the connection. This option may be used to
-** override this behaviour. The first parameter passed to this operation
+** override this behavior. The first parameter passed to this operation
** is an integer - positive to disable checkpoints-on-close, or zero (the
** default) to enable them, and negative to leave the setting unchanged.
** The second parameter is a pointer to an integer
@@ -2455,7 +2456,7 @@ struct sqlite3_mem_methods {
** the [VACUUM] command will fail with an obscure error when attempting to
** process a table with generated columns and a descending index. This is
** not considered a bug since SQLite versions 3.3.0 and earlier do not support
-** either generated columns or decending indexes.
+** either generated columns or descending indexes.
** </dd>
**
** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]]
@@ -2736,6 +2737,7 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
**
** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
** or not an interrupt is currently in effect for [database connection] D.
+** It returns 1 if an interrupt is currently in effect, or 0 otherwise.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
SQLITE_API int sqlite3_is_interrupted(sqlite3*);
@@ -3389,8 +3391,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** M argument should be the bitwise OR-ed combination of
** zero or more [SQLITE_TRACE] constants.
**
-** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
-** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
+** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P)
+** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or
+** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each
+** database connection may have at most one trace callback.
**
** ^The X callback is invoked whenever any of the events identified by
** mask M occur. ^The integer return value from the callback is currently
@@ -3759,7 +3763,7 @@ SQLITE_API int sqlite3_open_v2(
** as F) must be one of:
** <ul>
** <li> A database filename pointer created by the SQLite core and
-** passed into the xOpen() method of a VFS implemention, or
+** passed into the xOpen() method of a VFS implementation, or
** <li> A filename obtained from [sqlite3_db_filename()], or
** <li> A new filename constructed using [sqlite3_create_filename()].
** </ul>
@@ -3872,7 +3876,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
/*
** CAPI3REF: Create and Destroy VFS Filenames
**
-** These interfces are provided for use by [VFS shim] implementations and
+** These interfaces are provided for use by [VFS shim] implementations and
** are not useful outside of that context.
**
** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
@@ -3951,14 +3955,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
+** text that describes the error, as either UTF-8 or UTF-16 respectively,
+** or NULL if no error message is available.
+** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
+** ^The sqlite3_errstr(E) interface returns the English-language text
+** that describes the [result code] E, as UTF-8, or NULL if E is not an
+** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
@@ -4420,6 +4427,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
+** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement
+** METHOD: sqlite3_stmt
+**
+** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN
+** setting for [prepared statement] S. If E is zero, then S becomes
+** a normal prepared statement. If E is 1, then S behaves as if
+** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if
+** its SQL text began with "[EXPLAIN QUERY PLAN]".
+**
+** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared.
+** SQLite tries to avoid a reprepare, but a reprepare might be necessary
+** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode.
+**
+** Because of the potential need to reprepare, a call to
+** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be
+** reprepared because it was created using [sqlite3_prepare()] instead of
+** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and
+** hence has no saved SQL text with which to reprepare.
+**
+** Changing the explain setting for a prepared statement does not change
+** the original SQL text for the statement. Hence, if the SQL text originally
+** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0)
+** is called to convert the statement into an ordinary statement, the EXPLAIN
+** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S)
+** output, even though the statement now acts like a normal SQL statement.
+**
+** This routine returns SQLITE_OK if the explain mode is successfully
+** changed, or an error code if the explain mode could not be changed.
+** The explain mode cannot be changed while a statement is active.
+** Hence, it is good practice to call [sqlite3_reset(S)]
+** immediately prior to calling sqlite3_stmt_explain(S,E).
+*/
+SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode);
+
+/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
@@ -4582,7 +4624,7 @@ typedef struct sqlite3_context sqlite3_context;
** with it may be passed. ^It is called to dispose of the BLOB or string even
** if the call to the bind API fails, except the destructor is not called if
** the third parameter is a NULL pointer or the fourth parameter is negative.
-** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that
+** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that
** the application remains responsible for disposing of the object. ^In this
** case, the object and the provided pointer to it must remain valid until
** either the prepared statement is finalized or the same SQL parameter is
@@ -5261,20 +5303,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
** back to the beginning of its program.
**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
-** or if [sqlite3_step(S)] has never before been called on S,
-** then [sqlite3_reset(S)] returns [SQLITE_OK].
+** ^The return code from [sqlite3_reset(S)] indicates whether or not
+** the previous evaluation of prepared statement S completed successfully.
+** ^If [sqlite3_step(S)] has never before been called on S or if
+** [sqlite3_step(S)] has not been called since the previous call
+** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return
+** [SQLITE_OK].
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S indicated an error, then
** [sqlite3_reset(S)] returns an appropriate [error code].
+** ^The [sqlite3_reset(S)] interface might also return an [error code]
+** if there were no prior errors but the process of resetting
+** the prepared statement caused a new error. ^For example, if an
+** [INSERT] statement with a [RETURNING] clause is only stepped one time,
+** that one call to [sqlite3_step(S)] might return SQLITE_ROW but
+** the overall statement might still fail and the [sqlite3_reset(S)] call
+** might return SQLITE_BUSY if locking constraints prevent the
+** database change from committing. Therefore, it is important that
+** applications check the return code from [sqlite3_reset(S)] even if
+** no prior call to [sqlite3_step(S)] indicated a problem.
**
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
+
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
@@ -5485,7 +5540,7 @@ SQLITE_API int sqlite3_create_window_function(
** [application-defined SQL function]
** that has side-effects or that could potentially leak sensitive information.
** This will prevent attacks in which an application is tricked
-** into using a database file that has had its schema surreptiously
+** into using a database file that has had its schema surreptitiously
** modified to invoke the application-defined function in ways that are
** harmful.
** <p>
@@ -5521,13 +5576,27 @@ SQLITE_API int sqlite3_create_window_function(
** </dd>
**
** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
-** Specifying this flag makes no difference for scalar or aggregate user
-** functions. However, if it is not specified for a user-defined window
-** function, then any sub-types belonging to arguments passed to the window
-** function may be discarded before the window function is called (i.e.
-** sqlite3_value_subtype() will always return 0).
+** This flag instructs SQLite to omit some corner-case optimizations that
+** might disrupt the operation of the [sqlite3_value_subtype()] function,
+** causing it to return zero rather than the correct subtype().
+** SQL functions that invokes [sqlite3_value_subtype()] should have this
+** property. If the SQLITE_SUBTYPE property is omitted, then the return
+** value from [sqlite3_value_subtype()] might sometimes be zero even though
+** a non-zero subtype was specified by the function argument expression.
+**
+** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd>
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call
+** [sqlite3_result_subtype()] to cause a sub-type to be associated with its
+** result.
+** Every function that invokes [sqlite3_result_subtype()] should have this
+** property. If it does not, then the call to [sqlite3_result_subtype()]
+** might become a no-op if the function is used as term in an
+** [expression index]. On the other hand, SQL functions that never invoke
+** [sqlite3_result_subtype()] should avoid setting this property, as the
+** purpose of this property is to disable certain optimizations that are
+** incompatible with subtypes.
** </dd>
** </dl>
*/
@@ -5535,6 +5604,7 @@ SQLITE_API int sqlite3_create_window_function(
#define SQLITE_DIRECTONLY 0x000080000
#define SQLITE_SUBTYPE 0x000100000
#define SQLITE_INNOCUOUS 0x000200000
+#define SQLITE_RESULT_SUBTYPE 0x001000000
/*
** CAPI3REF: Deprecated Functions
@@ -5731,6 +5801,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
** information can be used to pass a limited amount of context from
** one SQL function to another. Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
+**
+** Every [application-defined SQL function] that invoke this interface
+** should include the [SQLITE_SUBTYPE] property in the text
+** encoding argument when the function is [sqlite3_create_function|registered].
+** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype()
+** might return zero instead of the upstream subtype in some corner cases.
*/
SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
@@ -5829,48 +5905,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
-** associate metadata with argument values. If the same value is passed to
-** multiple invocations of the same SQL function during query execution, under
-** some circumstances the associated metadata may be preserved. An example
-** of where this might be useful is in a regular-expression matching
-** function. The compiled version of the regular expression can be stored as
-** metadata associated with the pattern string.
+** associate auxiliary data with argument values. If the same argument
+** value is passed to multiple invocations of the same SQL function during
+** query execution, under some circumstances the associated auxiliary data
+** might be preserved. An example of where this might be useful is in a
+** regular-expression matching function. The compiled version of the regular
+** expression can be stored as auxiliary data associated with the pattern string.
** Then as long as the pattern string remains the same,
** the compiled regular expression can be reused on multiple
** invocations of the same function.
**
-** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
+** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data
** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
** value to the application-defined function. ^N is zero for the left-most
-** function argument. ^If there is no metadata
+** function argument. ^If there is no auxiliary data
** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
** returns a NULL pointer.
**
-** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
-** argument of the application-defined function. ^Subsequent
+** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the
+** N-th argument of the application-defined function. ^Subsequent
** calls to sqlite3_get_auxdata(C,N) return P from the most recent
-** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
-** NULL if the metadata has been discarded.
+** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or
+** NULL if the auxiliary data has been discarded.
** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
** SQLite will invoke the destructor function X with parameter P exactly
-** once, when the metadata is discarded.
-** SQLite is free to discard the metadata at any time, including: <ul>
+** once, when the auxiliary data is discarded.
+** SQLite is free to discard the auxiliary data at any time, including: <ul>
** <li> ^(when the corresponding function parameter changes)^, or
** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
** SQL statement)^, or
** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
** parameter)^, or
** <li> ^(during the original sqlite3_set_auxdata() call when a memory
-** allocation error occurs.)^ </ul>
+** allocation error occurs.)^
+** <li> ^(during the original sqlite3_set_auxdata() call if the function
+** is evaluated during query planning instead of during query execution,
+** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul>
**
-** Note the last bullet in particular. The destructor X in
+** Note the last two bullets in particular. The destructor X in
** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
** should be called near the end of the function implementation and the
** function implementation should not make any use of P after
-** sqlite3_set_auxdata() has been called.
-**
-** ^(In practice, metadata is preserved between function calls for
+** sqlite3_set_auxdata() has been called. Furthermore, a call to
+** sqlite3_get_auxdata() that occurs immediately after a corresponding call
+** to sqlite3_set_auxdata() might still return NULL if an out-of-memory
+** condition occurred during the sqlite3_set_auxdata() call or if the
+** function is being evaluated during query planning rather than during
+** query execution.
+**
+** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
** values and [parameters] and expressions composed from the same.)^
**
@@ -5880,10 +5964,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
**
** These routines must be called from the same thread in which
** the SQL function is running.
+**
+** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()].
*/
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+/*
+** CAPI3REF: Database Connection Client Data
+** METHOD: sqlite3
+**
+** These functions are used to associate one or more named pointers
+** with a [database connection].
+** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P
+** to be attached to [database connection] D using name N. Subsequent
+** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P
+** or a NULL pointer if there were no prior calls to
+** sqlite3_set_clientdata() with the same values of D and N.
+** Names are compared using strcmp() and are thus case sensitive.
+**
+** If P and X are both non-NULL, then the destructor X is invoked with
+** argument P on the first of the following occurrences:
+** <ul>
+** <li> An out-of-memory error occurs during the call to
+** sqlite3_set_clientdata() which attempts to register pointer P.
+** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made
+** with the same D and N parameters.
+** <li> The database connection closes. SQLite does not make any guarantees
+** about the order in which destructors are called, only that all
+** destructors will be called exactly once at some point during the
+** database connection closing process.
+** </ul>
+**
+** SQLite does not do anything with client data other than invoke
+** destructors on the client data at the appropriate time. The intended
+** use for client data is to provide a mechanism for wrapper libraries
+** to store additional information about an SQLite database connection.
+**
+** There is no limit (other than available memory) on the number of different
+** client data pointers (with different names) that can be attached to a
+** single database connection. However, the implementation is optimized
+** for the case of having only one or two different client data names.
+** Applications and wrapper libraries are discouraged from using more than
+** one client data name each.
+**
+** There is no way to enumerate the client data pointers
+** associated with a database connection. The N parameter can be thought
+** of as a secret key such that only code that knows the secret key is able
+** to access the associated data.
+**
+** Security Warning: These interfaces should not be exposed in scripting
+** languages or in other circumstances where it might be possible for an
+** an attacker to invoke them. Any agent that can invoke these interfaces
+** can probably also take control of the process.
+**
+** Database connection client data is only available for SQLite
+** version 3.44.0 ([dateof:3.44.0]) and later.
+**
+** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()].
+*/
+SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*);
+SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*));
/*
** CAPI3REF: Constants Defining Special Destructor Behavior
@@ -6085,6 +6226,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
+**
+** Every [application-defined SQL function] that invokes this interface
+** should include the [SQLITE_RESULT_SUBTYPE] property in its
+** text encoding argument when the SQL function is
+** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE]
+** property is omitted from the function that invokes sqlite3_result_subtype(),
+** then in some cases the sqlite3_result_subtype() might fail to set
+** the result subtype.
+**
+** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any
+** SQL function that invokes the sqlite3_result_subtype() interface
+** and that does not have the SQLITE_RESULT_SUBTYPE property will raise
+** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1
+** by default.
*/
SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
@@ -6516,7 +6671,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema);
/*
-** CAPI3REF: Allowed return values from [sqlite3_txn_state()]
+** CAPI3REF: Allowed return values from sqlite3_txn_state()
** KEYWORDS: {transaction state}
**
** These constants define the current transaction state of a database file.
@@ -6648,7 +6803,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all
** previous invocations for that database connection. ^If the callback
** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer,
-** then the autovacuum steps callback is cancelled. The return value
+** then the autovacuum steps callback is canceled. The return value
** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might
** be some other error code if something goes wrong. The current
** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other
@@ -7167,6 +7322,10 @@ struct sqlite3_module {
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
+ /* The methods above are in versions 1 through 3 of the sqlite_module object.
+ ** Those below are for version 4 and greater. */
+ int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema,
+ const char *zTabName, int mFlags, char **pzErr);
};
/*
@@ -7654,7 +7813,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
** code is returned and the transaction rolled back.
**
** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
+** open blob handle results in undefined behavior. ^Calling this routine
** with a null pointer (such as would be returned by a failed call to
** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
** is passed a valid open blob handle, the values returned by the
@@ -7881,9 +8040,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
+** will always return SQLITE_BUSY. In most cases the SQLite core only uses
+** sqlite3_mutex_try() as an optimization, so this is acceptable
+** behavior. The exceptions are unix builds that set the
+** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
+** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
@@ -8134,6 +8295,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SAVE 5
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
+#define SQLITE_TESTCTRL_FK_NO_ACTION 7
#define SQLITE_TESTCTRL_BITVEC_TEST 8
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
@@ -8141,6 +8303,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
+#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
@@ -8162,7 +8325,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_TUNE 32
#define SQLITE_TESTCTRL_LOGEST 33
-#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */
+#define SQLITE_TESTCTRL_USELONGDOUBLE 34
+#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@@ -9618,7 +9782,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
-** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
+** the [xConnect] or [xCreate] methods of a [virtual table] implementation
** prohibits that virtual table from being used from within triggers and
** views.
** </dd>
@@ -9808,7 +9972,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*);
** communicated to the xBestIndex method as a
** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use
** this constraint, it must set the corresponding
-** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under
+** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under
** the usual mode of handling IN operators, SQLite generates [bytecode]
** that invokes the [xFilter|xFilter() method] once for each value
** on the right-hand side of the IN operator.)^ Thus the virtual table
@@ -10237,7 +10401,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** When the [sqlite3_blob_write()] API is used to update a blob column,
** the pre-update hook is invoked with SQLITE_DELETE. This is because the
** in this case the new values are not available. In this case, when a
-** callback made with op==SQLITE_DELETE is actuall a write using the
+** callback made with op==SQLITE_DELETE is actually a write using the
** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns
** the index of the column being written. In other cases, where the
** pre-update hook is being invoked for some other reason, including a
@@ -10498,6 +10662,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
** of the database exists.
**
+** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set,
+** the returned buffer content will remain accessible and unchanged
+** until either the next write operation on the connection or when
+** the connection is closed, and applications must not modify the
+** buffer. If the bit had been clear, the returned buffer will not
+** be accessed by SQLite after the call.
+**
** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
** allocation error occurs.
@@ -10546,6 +10717,9 @@ SQLITE_API unsigned char *sqlite3_serialize(
** SQLite will try to increase the buffer size using sqlite3_realloc64()
** if writes on the database cause it to grow larger than M bytes.
**
+** Applications must not modify the buffer P or invalidate it before
+** the database connection D is closed.
+**
** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
** database is currently in a read transaction or is involved in a backup
** operation.
@@ -10554,6 +10728,13 @@ SQLITE_API unsigned char *sqlite3_serialize(
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the
** function returns SQLITE_ERROR.
**
+** The deserialized database should not be in [WAL mode]. If the database
+** is in WAL mode, then any attempt to use the database file will result
+** in an [SQLITE_CANTOPEN] error. The application can set the
+** [file format version numbers] (bytes 18 and 19) of the input database P
+** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the
+** database file into rollback mode and work around this limitation.
+**
** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
** [sqlite3_free()] is invoked on argument P prior to returning.
@@ -11627,6 +11808,18 @@ SQLITE_API int sqlite3changeset_concat(
/*
+** CAPI3REF: Upgrade the Schema of a Changeset/Patchset
+*/
+SQLITE_API int sqlite3changeset_upgrade(
+ sqlite3 *db,
+ const char *zDb,
+ int nIn, const void *pIn, /* Input changeset */
+ int *pnOut, void **ppOut /* OUT: Inverse of input */
+);
+
+
+
+/*
** CAPI3REF: Changegroup Handle
**
** A changegroup is an object used to combine two or more
@@ -11673,6 +11866,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add a Schema to a Changegroup
+** METHOD: sqlite3_changegroup_schema
+**
+** This method may be used to optionally enforce the rule that the changesets
+** added to the changegroup handle must match the schema of database zDb
+** ("main", "temp", or the name of an attached database). If
+** sqlite3changegroup_add() is called to add a changeset that is not compatible
+** with the configured schema, SQLITE_SCHEMA is returned and the changegroup
+** object is left in an undefined state.
+**
+** A changeset schema is considered compatible with the database schema in
+** the same way as for sqlite3changeset_apply(). Specifically, for each
+** table in the changeset, there exists a database table with:
+**
+** <ul>
+** <li> The name identified by the changeset, and
+** <li> at least as many columns as recorded in the changeset, and
+** <li> the primary key columns in the same position as recorded in
+** the changeset.
+** </ul>
+**
+** The output of the changegroup object always has the same schema as the
+** database nominated using this function. In cases where changesets passed
+** to sqlite3changegroup_add() have fewer columns than the corresponding table
+** in the database schema, these are filled in using the default column
+** values from the database schema. This makes it possible to combined
+** changesets that have different numbers of columns for a single table
+** within a changegroup, provided that they are otherwise compatible.
+*/
+SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb);
+
+/*
** CAPI3REF: Add A Changeset To A Changegroup
** METHOD: sqlite3_changegroup
**
@@ -11740,13 +11965,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
** If the new changeset contains changes to a table that is already present
** in the changegroup, then the number of columns and the position of the
** primary key columns for the table must be consistent. If this is not the
-** case, this function fails with SQLITE_SCHEMA. If the input changeset
-** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
-** returned. Or, if an out-of-memory condition occurs during processing, this
-** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
-** of the final contents of the changegroup is undefined.
+** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup
+** object has been configured with a database schema using the
+** sqlite3changegroup_schema() API, then it is possible to combine changesets
+** with different numbers of columns for a single table, provided that
+** they are otherwise compatible.
+**
+** If the input changeset appears to be corrupt and the corruption is
+** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition
+** occurs during processing, this function returns SQLITE_NOMEM.
**
-** If no error occurs, SQLITE_OK is returned.
+** In all cases, if an error occurs the state of the final contents of the
+** changegroup is undefined. If no error occurs, SQLITE_OK is returned.
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
@@ -12011,10 +12241,17 @@ SQLITE_API int sqlite3changeset_apply_v2(
** <li>an insert change if all fields of the conflicting row match
** the row being inserted.
** </ul>
+**
+** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd>
+** If this flag it set, then all foreign key constraints in the target
+** database behave as if they were declared with "ON UPDATE NO ACTION ON
+** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
+** or SET DEFAULT.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004
+#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@@ -12580,8 +12817,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of columns in the table, SQLITE_RANGE is returned.
+**
+** Otherwise, this function attempts to retrieve the text of column iCol of
+** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@@ -12591,8 +12831,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
+** If parameter iCol is less than zero, or greater than or equal to the
+** number of phrases in the current query, as returned by xPhraseCount,
+** 0 is returned. Otherwise, this function returns the number of tokens in
+** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@@ -12608,12 +12850,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
+** output by xInstCount(). If iIdx is less than zero or greater than
+** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
+** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. Returns SQLITE_OK if successful, or an error
-** code (i.e. SQLITE_NOMEM) if an error occurs.
+** first token of the phrase. SQLITE_OK is returned if successful, or an
+** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@@ -12639,6 +12882,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
+** If parameter iPhrase is less than zero, or greater than or equal to
+** the number of phrases in the query, as returned by xPhraseCount(),
+** this function returns SQLITE_RANGE.
+**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@@ -12753,6 +13000,39 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
+**
+** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase iPhrase of the current
+** query. Before returning, output parameter *ppToken is set to point
+** to a buffer containing the requested token, and *pnToken to the
+** size of this buffer in bytes.
+**
+** If iPhrase or iToken are less than zero, or if iPhrase is greater than
+** or equal to the number of phrases in the query as reported by
+** xPhraseCount(), or if iToken is equal to or greater than the number of
+** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
+ are both zeroed.
+**
+** The output text is not a copy of the query text that specified the
+** token. It is the output of the tokenizer module. For tokendata=1
+** tables, this includes any embedded 0x00 and trailing data.
+**
+** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
+** This is used to access token iToken of phrase hit iIdx within the
+** current row. If iIdx is less than zero or greater than or equal to the
+** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
+** output variable (*ppToken) is set to point to a buffer containing the
+** matching document token, and (*pnToken) to the size of that buffer in
+** bytes. This API is not available if the specified token matches a
+** prefix query term. In that case both output variables are always set
+** to 0.
+**
+** The output text is not a copy of the document text that was tokenized.
+** It is the output of the tokenizer module. For tokendata=1 tables, this
+** includes any embedded 0x00 and trailing data.
+**
+** This API can be quite slow if used with an FTS5 table created with the
+** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
@@ -12790,6 +13070,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
+
+ /* Below this point are iVersion>=3 only */
+ int (*xQueryToken)(Fts5Context*,
+ int iPhrase, int iToken,
+ const char **ppToken, int *pnToken
+ );
+ int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*
@@ -12984,8 +13271,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
+** provide synonyms when tokenizing document text (method (3)) or query
+** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@@ -13033,7 +13320,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@@ -13042,7 +13329,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
- void **ppContext,
+ void **ppUserData,
fts5_tokenizer *pTokenizer
);
@@ -13050,7 +13337,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
- void *pContext,
+ void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go
index 5e4e2ff..4b3b6ca 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3.go
@@ -21,7 +21,6 @@ package sqlite3
#cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1
#cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT
#cgo CFLAGS: -Wno-deprecated-declarations
-#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1
#cgo openbsd CFLAGS: -I/usr/local/include
#cgo openbsd LDFLAGS: -L/usr/local/lib
#ifndef USE_LIBSQLITE3
@@ -48,6 +47,18 @@ package sqlite3
# define SQLITE_DETERMINISTIC 0
#endif
+#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64)
+# undef USE_PREAD
+# undef USE_PWRITE
+# define USE_PREAD64 1
+# define USE_PWRITE64 1
+#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+# undef USE_PREAD
+# undef USE_PWRITE
+# define USE_PREAD64 1
+# define USE_PWRITE64 1
+#endif
+
static int
_sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) {
#ifdef SQLITE_OPEN_URI
@@ -596,10 +607,9 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin
// RegisterFunc makes a Go function available as a SQLite function.
//
// The Go function can have arguments of the following types: any
-// numeric type except complex, bool, []byte, string and
-// interface{}. interface{} arguments are given the direct translation
-// of the SQLite data type: int64 for INTEGER, float64 for FLOAT,
-// []byte for BLOB, string for TEXT.
+// numeric type except complex, bool, []byte, string and any.
+// any arguments are given the direct translation of the SQLite data type:
+// int64 for INTEGER, float64 for FLOAT, []byte for BLOB, string for TEXT.
//
// The function can additionally be variadic, as long as the type of
// the variadic argument is one of the above.
@@ -609,7 +619,7 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin
// optimizations in its queries.
//
// See _example/go_custom_funcs for a detailed example.
-func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) error {
+func (c *SQLiteConn) RegisterFunc(name string, impl any, pure bool) error {
var fi functionInfo
fi.f = reflect.ValueOf(impl)
t := fi.f.Type()
@@ -691,7 +701,7 @@ func sqlite3CreateFunction(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTe
// return an error in addition to their other return values.
//
// See _example/go_custom_funcs for a detailed example.
-func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error {
+func (c *SQLiteConn) RegisterAggregator(name string, impl any, pure bool) error {
var ai aggInfo
ai.constructor = reflect.ValueOf(impl)
t := ai.constructor.Type()
@@ -865,14 +875,16 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []driver.Named
// consume the number of arguments used in the current
// statement and append all named arguments not
// contained therein
- stmtArgs = append(stmtArgs, args[start:start+na]...)
- for i := range args {
- if (i < start || i >= na) && args[i].Name != "" {
- stmtArgs = append(stmtArgs, args[i])
+ if len(args[start:start+na]) > 0 {
+ stmtArgs = append(stmtArgs, args[start:start+na]...)
+ for i := range args {
+ if (i < start || i >= na) && args[i].Name != "" {
+ stmtArgs = append(stmtArgs, args[i])
+ }
+ }
+ for i := range stmtArgs {
+ stmtArgs[i].Ordinal = i + 1
}
- }
- for i := range stmtArgs {
- stmtArgs[i].Ordinal = i + 1
}
res, err = s.(*SQLiteStmt).exec(ctx, stmtArgs)
if err != nil && err != driver.ErrSkip {
@@ -965,103 +977,104 @@ func (c *SQLiteConn) begin(ctx context.Context) (driver.Tx, error) {
// The argument is may be either in parentheses or it may be separated from
// the pragma name by an equal sign. The two syntaxes yield identical results.
// In many pragmas, the argument is a boolean. The boolean can be one of:
-// 1 yes true on
-// 0 no false off
+//
+// 1 yes true on
+// 0 no false off
//
// You can specify a DSN string using a URI as the filename.
-// test.db
-// file:test.db?cache=shared&mode=memory
-// :memory:
-// file::memory:
//
-// mode
-// Access mode of the database.
-// https://www.sqlite.org/c3ref/open.html
-// Values:
-// - ro
-// - rw
-// - rwc
-// - memory
+// test.db
+// file:test.db?cache=shared&mode=memory
+// :memory:
+// file::memory:
//
-// cache
-// SQLite Shared-Cache Mode
-// https://www.sqlite.org/sharedcache.html
-// Values:
-// - shared
-// - private
+// mode
+// Access mode of the database.
+// https://www.sqlite.org/c3ref/open.html
+// Values:
+// - ro
+// - rw
+// - rwc
+// - memory
//
-// immutable=Boolean
-// The immutable parameter is a boolean query parameter that indicates
-// that the database file is stored on read-only media. When immutable is set,
-// SQLite assumes that the database file cannot be changed,
-// even by a process with higher privilege,
-// and so the database is opened read-only and all locking and change detection is disabled.
-// Caution: Setting the immutable property on a database file that
-// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors.
+// cache
+// SQLite Shared-Cache Mode
+// https://www.sqlite.org/sharedcache.html
+// Values:
+// - shared
+// - private
//
-// go-sqlite3 adds the following query parameters to those used by SQLite:
-// _loc=XXX
-// Specify location of time format. It's possible to specify "auto".
+// immutable=Boolean
+// The immutable parameter is a boolean query parameter that indicates
+// that the database file is stored on read-only media. When immutable is set,
+// SQLite assumes that the database file cannot be changed,
+// even by a process with higher privilege,
+// and so the database is opened read-only and all locking and change detection is disabled.
+// Caution: Setting the immutable property on a database file that
+// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors.
//
-// _mutex=XXX
-// Specify mutex mode. XXX can be "no", "full".
+// go-sqlite3 adds the following query parameters to those used by SQLite:
//
-// _txlock=XXX
-// Specify locking behavior for transactions. XXX can be "immediate",
-// "deferred", "exclusive".
+// _loc=XXX
+// Specify location of time format. It's possible to specify "auto".
//
-// _auto_vacuum=X | _vacuum=X
-// 0 | none - Auto Vacuum disabled
-// 1 | full - Auto Vacuum FULL
-// 2 | incremental - Auto Vacuum Incremental
+// _mutex=XXX
+// Specify mutex mode. XXX can be "no", "full".
//
-// _busy_timeout=XXX"| _timeout=XXX
-// Specify value for sqlite3_busy_timeout.
+// _txlock=XXX
+// Specify locking behavior for transactions. XXX can be "immediate",
+// "deferred", "exclusive".
//
-// _case_sensitive_like=Boolean | _cslike=Boolean
-// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like
-// Default or disabled the LIKE operation is case-insensitive.
-// When enabling this options behaviour of LIKE will become case-sensitive.
+// _auto_vacuum=X | _vacuum=X
+// 0 | none - Auto Vacuum disabled
+// 1 | full - Auto Vacuum FULL
+// 2 | incremental - Auto Vacuum Incremental
//
-// _defer_foreign_keys=Boolean | _defer_fk=Boolean
-// Defer Foreign Keys until outermost transaction is committed.
+// _busy_timeout=XXX"| _timeout=XXX
+// Specify value for sqlite3_busy_timeout.
//
-// _foreign_keys=Boolean | _fk=Boolean
-// Enable or disable enforcement of foreign keys.
+// _case_sensitive_like=Boolean | _cslike=Boolean
+// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like
+// Default or disabled the LIKE operation is case-insensitive.
+// When enabling this options behaviour of LIKE will become case-sensitive.
//
-// _ignore_check_constraints=Boolean
-// This pragma enables or disables the enforcement of CHECK constraints.
-// The default setting is off, meaning that CHECK constraints are enforced by default.
+// _defer_foreign_keys=Boolean | _defer_fk=Boolean
+// Defer Foreign Keys until outermost transaction is committed.
//
-// _journal_mode=MODE | _journal=MODE
-// Set journal mode for the databases associated with the current connection.
-// https://www.sqlite.org/pragma.html#pragma_journal_mode
+// _foreign_keys=Boolean | _fk=Boolean
+// Enable or disable enforcement of foreign keys.
//
-// _locking_mode=X | _locking=X
-// Sets the database connection locking-mode.
-// The locking-mode is either NORMAL or EXCLUSIVE.
-// https://www.sqlite.org/pragma.html#pragma_locking_mode
+// _ignore_check_constraints=Boolean
+// This pragma enables or disables the enforcement of CHECK constraints.
+// The default setting is off, meaning that CHECK constraints are enforced by default.
//
-// _query_only=Boolean
-// The query_only pragma prevents all changes to database files when enabled.
+// _journal_mode=MODE | _journal=MODE
+// Set journal mode for the databases associated with the current connection.
+// https://www.sqlite.org/pragma.html#pragma_journal_mode
//
-// _recursive_triggers=Boolean | _rt=Boolean
-// Enable or disable recursive triggers.
+// _locking_mode=X | _locking=X
+// Sets the database connection locking-mode.
+// The locking-mode is either NORMAL or EXCLUSIVE.
+// https://www.sqlite.org/pragma.html#pragma_locking_mode
//
-// _secure_delete=Boolean|FAST
-// When secure_delete is on, SQLite overwrites deleted content with zeros.
-// https://www.sqlite.org/pragma.html#pragma_secure_delete
+// _query_only=Boolean
+// The query_only pragma prevents all changes to database files when enabled.
//
-// _synchronous=X | _sync=X
-// Change the setting of the "synchronous" flag.
-// https://www.sqlite.org/pragma.html#pragma_synchronous
+// _recursive_triggers=Boolean | _rt=Boolean
+// Enable or disable recursive triggers.
//
-// _writable_schema=Boolean
-// When this pragma is on, the SQLITE_MASTER tables in which database
-// can be changed using ordinary UPDATE, INSERT, and DELETE statements.
-// Warning: misuse of this pragma can easily result in a corrupt database file.
+// _secure_delete=Boolean|FAST
+// When secure_delete is on, SQLite overwrites deleted content with zeros.
+// https://www.sqlite.org/pragma.html#pragma_secure_delete
//
+// _synchronous=X | _sync=X
+// Change the setting of the "synchronous" flag.
+// https://www.sqlite.org/pragma.html#pragma_synchronous
//
+// _writable_schema=Boolean
+// When this pragma is on, the SQLITE_MASTER tables in which database
+// can be changed using ordinary UPDATE, INSERT, and DELETE statements.
+// Warning: misuse of this pragma can easily result in a corrupt database file.
func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
if C.sqlite3_threadsafe() == 0 {
return nil, errors.New("sqlite library was not compiled for thread-safe operation")
@@ -1895,6 +1908,7 @@ func (s *SQLiteStmt) Close() error {
if rv != C.SQLITE_OK {
return s.c.lastError()
}
+ s.c = nil
runtime.SetFinalizer(s, nil)
return nil
}
@@ -2000,6 +2014,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive
closed: false,
ctx: ctx,
}
+ runtime.SetFinalizer(rows, (*SQLiteRows).Close)
return rows, nil
}
@@ -2045,6 +2060,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []driver.NamedValue) (driver
err error
}
resultCh := make(chan result)
+ defer close(resultCh)
go func() {
r, err := s.execSync(args)
resultCh <- result{r, err}
@@ -2111,6 +2127,8 @@ func (rc *SQLiteRows) Close() error {
return rc.s.c.lastError()
}
rc.s.mu.Unlock()
+ rc.s = nil
+ runtime.SetFinalizer(rc, nil)
return nil
}
@@ -2157,6 +2175,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
return rc.nextSyncLocked(dest)
}
resultCh := make(chan error)
+ defer close(resultCh)
go func() {
resultCh <- rc.nextSyncLocked(dest)
}()
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go
index 7c7431d..7c7431d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_context.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_context.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go
index afd9333..bd9a3bc 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt.go
@@ -50,15 +50,15 @@ import (
// perhaps using a cryptographic hash function like SHA1.
// CryptEncoderSHA1 encodes a password with SHA1
-func CryptEncoderSHA1(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA1(pass []byte, hash any) []byte {
h := sha1.Sum(pass)
return h[:]
}
// CryptEncoderSSHA1 encodes a password with SHA1 with the
// configured salt.
-func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA1(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha1.Sum(p)
@@ -67,15 +67,15 @@ func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte {
}
// CryptEncoderSHA256 encodes a password with SHA256
-func CryptEncoderSHA256(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA256(pass []byte, hash any) []byte {
h := sha256.Sum256(pass)
return h[:]
}
// CryptEncoderSSHA256 encodes a password with SHA256
// with the configured salt
-func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA256(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha256.Sum256(p)
@@ -84,15 +84,15 @@ func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte
}
// CryptEncoderSHA384 encodes a password with SHA384
-func CryptEncoderSHA384(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA384(pass []byte, hash any) []byte {
h := sha512.Sum384(pass)
return h[:]
}
// CryptEncoderSSHA384 encodes a password with SHA384
// with the configured salt
-func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA384(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum384(p)
@@ -101,15 +101,15 @@ func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte
}
// CryptEncoderSHA512 encodes a password with SHA512
-func CryptEncoderSHA512(pass []byte, hash interface{}) []byte {
+func CryptEncoderSHA512(pass []byte, hash any) []byte {
h := sha512.Sum512(pass)
return h[:]
}
// CryptEncoderSSHA512 encodes a password with SHA512
// with the configured salt
-func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte {
- return func(pass []byte, hash interface{}) []byte {
+func CryptEncoderSSHA512(salt string) func(pass []byte, hash any) []byte {
+ return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum512(p)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go
index 0329ca8..e37b467 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_func_crypt_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_func_crypt_test.go
@@ -29,7 +29,7 @@ func TestCryptEncoders(t *testing.T) {
}
for _, e := range tests {
- var fn func(pass []byte, hash interface{}) []byte
+ var fn func(pass []byte, hash any) []byte
switch e.enc {
case "sha1":
fn = CryptEncoderSHA1
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go
index a010cb7..f38d6d1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go113_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go113_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build go1.13 && cgo
// +build go1.13,cgo
package sqlite3
@@ -45,7 +46,7 @@ func TestBeginTxCancel(t *testing.T) {
}
}()
- err = conn.Raw(func(driverConn interface{}) error {
+ err = conn.Raw(func(driverConn any) error {
d, ok := driverConn.(driver.ConnBeginTx)
if !ok {
t.Fatal("unexpected: wrong type")
@@ -96,7 +97,7 @@ func TestStmtReadonly(t *testing.T) {
}
var ro bool
- c.Raw(func(dc interface{}) error {
+ c.Raw(func(dc any) error {
stmt, err := dc.(*SQLiteConn).Prepare(query)
if err != nil {
return err
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go
index 514fd7e..34cad08 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18.go
@@ -3,8 +3,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build cgo
-// +build go1.8
+//go:build cgo && go1.8
+// +build cgo,go1.8
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go
index 8c8c451..eec7479 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_go18_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_go18_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build go1.8 && cgo
// +build go1.8,cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go
index ac609c9..95cc7c0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_libsqlite3.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_libsqlite3.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build libsqlite3
// +build libsqlite3
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go
index 9433fea..03cbc8b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_omit_load_extension
// +build !sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go
index 8c75f9b..d4f8ce6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_omit.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_omit_load_extension
// +build sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go
index 97b1123..c6c03bb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_load_extension_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_load_extension_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_omit_load_extension
// +build !sqlite_omit_load_extension
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go
index 8c4d4d2..51240cb 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_allow_uri_authority.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_allow_uri_authority.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_allow_uri_authority
// +build sqlite_allow_uri_authority
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go
index 63c80cf..565dbc2 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_app_armor.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_app_armor.go
@@ -4,8 +4,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build !windows
-// +build sqlite_app_armor
+//go:build !windows && sqlite_app_armor
+// +build !windows,sqlite_app_armor
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go
index c67fa82..63659b4 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata.go
@@ -1,3 +1,4 @@
+//go:build sqlite_column_metadata
// +build sqlite_column_metadata
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go
index 28767f1..0a9eec6 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_column_metadata_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_column_metadata_test.go
@@ -1,3 +1,4 @@
+//go:build sqlite_column_metadata
// +build sqlite_column_metadata
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go
index a676e09..82c944e 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_foreign_keys.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_foreign_keys.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_foreign_keys
// +build sqlite_foreign_keys
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go
index ce44474..a7b31a7 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts3_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts3_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go
index 0f38df7..2645f28 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_fts5.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_fts5.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_fts5 || fts5
// +build sqlite_fts5 fts5
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go
index f82bdd0..2d47827 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_icu.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_icu.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_icu || icu
// +build sqlite_icu icu
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go
index 6512b2b..cd2e540 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_introspect.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_introspect.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_introspect
// +build sqlite_introspect
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go
index 7cd68d3..bd62d9a 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_math_functions
// +build sqlite_math_functions
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go
index 6ff076b..09dbd8d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_math_functions_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_math_functions_test.go
@@ -1,3 +1,4 @@
+//go:build sqlite_math_functions
// +build sqlite_math_functions
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go
index 9a30566..9a30566 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_os_trace.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_os_trace.go
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go
index cea032e..ed725ee 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go
index b43e482..8cce278 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_preupdate_hook
// +build sqlite_preupdate_hook
package sqlite3
@@ -54,10 +55,10 @@ func (d *SQLitePreUpdateData) Count() int {
return int(C.sqlite3_preupdate_count(d.Conn.db))
}
-func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error {
+func (d *SQLitePreUpdateData) row(dest []any, new bool) error {
for i := 0; i < d.Count() && i < len(dest); i++ {
var val *C.sqlite3_value
- var src interface{}
+ var src any
// Initially I tried making this just a function pointer argument, but
// it's absurdly complicated to pass C function pointers.
@@ -95,7 +96,7 @@ func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error {
// Old populates dest with the row data to be replaced. This works similar to
// database/sql's Rows.Scan()
-func (d *SQLitePreUpdateData) Old(dest ...interface{}) error {
+func (d *SQLitePreUpdateData) Old(dest ...any) error {
if d.Op == SQLITE_INSERT {
return errors.New("There is no old row for INSERT operations")
}
@@ -104,7 +105,7 @@ func (d *SQLitePreUpdateData) Old(dest ...interface{}) error {
// New populates dest with the replacement row data. This works similar to
// database/sql's Rows.Scan()
-func (d *SQLitePreUpdateData) New(dest ...interface{}) error {
+func (d *SQLitePreUpdateData) New(dest ...any) error {
if d.Op == SQLITE_DELETE {
return errors.New("There is no new row for DELETE operations")
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go
index 20c8766..4892602 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_hook_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_hook_test.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_preupdate_hook
// +build sqlite_preupdate_hook
package sqlite3
@@ -18,8 +19,8 @@ type preUpdateHookDataForTest struct {
tableName string
count int
op int
- oldRow []interface{}
- newRow []interface{}
+ oldRow []any
+ newRow []any
}
func TestPreUpdateHook(t *testing.T) {
@@ -29,7 +30,7 @@ func TestPreUpdateHook(t *testing.T) {
ConnectHook: func(conn *SQLiteConn) error {
conn.RegisterPreUpdateHook(func(data SQLitePreUpdateData) {
eval := -1
- oldRow := []interface{}{eval}
+ oldRow := []any{eval}
if data.Op != SQLITE_INSERT {
err := data.Old(oldRow...)
if err != nil {
@@ -38,7 +39,7 @@ func TestPreUpdateHook(t *testing.T) {
}
eval2 := -1
- newRow := []interface{}{eval2}
+ newRow := []any{eval2}
if data.Op != SQLITE_DELETE {
err := data.New(newRow...)
if err != nil {
@@ -47,7 +48,7 @@ func TestPreUpdateHook(t *testing.T) {
}
// tests dest bound checks in loop
- var tooSmallRow []interface{}
+ var tooSmallRow []any
if data.Op != SQLITE_INSERT {
err := data.Old(tooSmallRow...)
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go
index c510a15..f60da6c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_preupdate_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_preupdate_omit.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_preupdate_hook && cgo
// +build !sqlite_preupdate_hook,cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go
index 934fa6b..6bb05b8 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_secure_delete
// +build sqlite_secure_delete
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go
index b0de130..982020a 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_secure_delete_fast.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_secure_delete_fast.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_secure_delete_fast
// +build sqlite_secure_delete_fast
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go
index 2560c43..f1710c1 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize.go
@@ -1,3 +1,4 @@
+//go:build !libsqlite3 || sqlite_serialize
// +build !libsqlite3 sqlite_serialize
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go
index b154dd3..d00ead0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_omit.go
@@ -1,3 +1,4 @@
+//go:build libsqlite3 && !sqlite_serialize
// +build libsqlite3,!sqlite_serialize
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go
index 624c5a9..5c7efec 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_serialize_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_serialize_test.go
@@ -1,3 +1,4 @@
+//go:build !libsqlite3 || sqlite_serialize
// +build !libsqlite3 sqlite_serialize
package sqlite3
@@ -54,7 +55,7 @@ func TestSerializeDeserialize(t *testing.T) {
defer srcConn.Close()
var serialized []byte
- if err := srcConn.Raw(func(raw interface{}) error {
+ if err := srcConn.Raw(func(raw any) error {
var err error
serialized, err = raw.(*SQLiteConn).Serialize("")
return err
@@ -80,7 +81,7 @@ func TestSerializeDeserialize(t *testing.T) {
}
defer destConn.Close()
- if err := destConn.Raw(func(raw interface{}) error {
+ if err := destConn.Raw(func(raw any) error {
return raw.(*SQLiteConn).Deserialize(serialized, "")
}); err != nil {
t.Fatal("Failed to deserialize source database:", err)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go
index d4d30f0..799fbb0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_stat4.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_stat4.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_stat4
// +build sqlite_stat4
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c
index fc37b33..fc37b33 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.c
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.c
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go
index adfa26c..76f7bbf 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify.go
@@ -3,8 +3,8 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build cgo
-// +build sqlite_unlock_notify
+//go:build cgo && sqlite_unlock_notify
+// +build cgo,sqlite_unlock_notify
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go
index 95db938..3a9168c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_unlock_notify_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_unlock_notify_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_unlock_notify
// +build sqlite_unlock_notify
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go
index b62b608..de9630c 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_userauth
// +build sqlite_userauth
package sqlite3
@@ -79,7 +80,7 @@ var (
// If a database contains the SQLITE_USER table, then the
// call to Authenticate must be invoked with an
// appropriate username and password prior to enable read and write
-//access to the database.
+// access to the database.
//
// Return SQLITE_OK on success or SQLITE_ERROR if the username/password
// combination is incorrect or unknown.
@@ -103,9 +104,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authenticate(username, password string) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -155,9 +157,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserAdd(username, password string, admin int) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -207,9 +210,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserChange(username, password string, admin int) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -249,9 +253,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserDelete(username string) int {
// Allocate C Variables
cuser := C.CString(username)
@@ -280,8 +285,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// 0 - Disabled
-// 1 - Enabled
+//
+// 0 - Disabled
+// 1 - Enabled
func (c *SQLiteConn) authEnabled() int {
return int(C._sqlite3_auth_enabled(c.db))
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go
index 302cd57..15370df 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_omit.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_omit.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !sqlite_userauth
// +build !sqlite_userauth
package sqlite3
@@ -17,7 +18,7 @@ import (
// If a database contains the SQLITE_USER table, then the
// call to Authenticate must be invoked with an
// appropriate username and password prior to enable read and write
-//access to the database.
+// access to the database.
//
// Return SQLITE_OK on success or SQLITE_ERROR if the username/password
// combination is incorrect or unknown.
@@ -34,9 +35,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authenticate(username, password string) int {
// NOOP
return 0
@@ -65,9 +67,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserAdd(username, password string, admin int) int {
// NOOP
return 0
@@ -96,9 +99,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserChange(username, password string, admin int) int {
// NOOP
return 0
@@ -122,9 +126,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// C.SQLITE_OK (0)
-// C.SQLITE_ERROR (1)
-// C.SQLITE_AUTH (23)
+//
+// C.SQLITE_OK (0)
+// C.SQLITE_ERROR (1)
+// C.SQLITE_AUTH (23)
func (c *SQLiteConn) authUserDelete(username string) int {
// NOOP
return 0
@@ -142,8 +147,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) {
// It is however exported for usage within SQL by the user.
//
// Returns:
-// 0 - Disabled
-// 1 - Enabled
+//
+// 0 - Disabled
+// 1 - Enabled
func (c *SQLiteConn) authEnabled() int {
// NOOP
return 0
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go
index 543f48e..12e1151 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_userauth_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_userauth_test.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_userauth
// +build sqlite_userauth
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go
index 5185a96..df13c9d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_full.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_full.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vacuum_full
// +build sqlite_vacuum_full
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go
index a9d8a18..a2e4881 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vacuum_incr.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vacuum_incr.go
@@ -4,6 +4,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vacuum_incr
// +build sqlite_vacuum_incr
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go
index 4a93c46..9b164b3 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_vtable || vtable
// +build sqlite_vtable vtable
package sqlite3
@@ -516,7 +517,7 @@ func goMDestroy(pClientData unsafe.Pointer) {
func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char {
vtc := lookupHandle(pCursor).(*sqliteVTabCursor)
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- vals := make([]interface{}, 0, argc)
+ vals := make([]any, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
@@ -588,7 +589,7 @@ func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid
if v, ok := vt.vTab.(VTabUpdater); ok {
// convert argv
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- vals := make([]interface{}, 0, argc)
+ vals := make([]any, 0, argc)
for _, v := range args {
conv, err := callbackArgGeneric(v)
if err != nil {
@@ -662,9 +663,9 @@ type VTab interface {
// deleted.
// See: https://sqlite.org/vtab.html#xupdate
type VTabUpdater interface {
- Delete(interface{}) error
- Insert(interface{}, []interface{}) (int64, error)
- Update(interface{}, []interface{}) error
+ Delete(any) error
+ Insert(any, []any) (int64, error)
+ Update(any, []any) error
}
// VTabCursor describes cursors that point into the virtual table and are used
@@ -673,7 +674,7 @@ type VTabCursor interface {
// http://sqlite.org/vtab.html#xclose
Close() error
// http://sqlite.org/vtab.html#xfilter
- Filter(idxNum int, idxStr string, vals []interface{}) error
+ Filter(idxNum int, idxStr string, vals []any) error
// http://sqlite.org/vtab.html#xnext
Next() error
// http://sqlite.org/vtab.html#xeof
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go
index aae646d..64511e2 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_opt_vtable_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_opt_vtable_test.go
@@ -98,7 +98,7 @@ func (vc *testVTabCursor) Close() error {
return nil
}
-func (vc *testVTabCursor) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *testVTabCursor) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
@@ -236,10 +236,10 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 2 {
t.Fatalf("expected table vt to have exactly 2 rows, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
- if !reflect.DeepEqual(vt.data[1], []interface{}{int64(116), "d", "e"}) {
+ if !reflect.DeepEqual(vt.data[1], []any{int64(116), "d", "e"}) {
t.Fatalf("expected table vt entry 1 to be [116 d e], instead: %v", vt.data[1])
}
@@ -273,10 +273,10 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 2 {
t.Fatalf("expected table vt to have exactly 2 rows, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
- if !reflect.DeepEqual(vt.data[1], []interface{}{int64(117), "f", "e"}) {
+ if !reflect.DeepEqual(vt.data[1], []any{int64(117), "f", "e"}) {
t.Fatalf("expected table vt entry 1 to be [117 f e], instead: %v", vt.data[1])
}
@@ -297,7 +297,7 @@ func TestVUpdate(t *testing.T) {
if len(vt.data) != 1 {
t.Fatalf("expected table vt to have exactly 1 row, got: %d", len(vt.data))
}
- if !reflect.DeepEqual(vt.data[0], []interface{}{int64(115), "b", "c"}) {
+ if !reflect.DeepEqual(vt.data[0], []any{int64(115), "b", "c"}) {
t.Fatalf("expected table vt entry 0 to be [115 b c], instead: %v", vt.data[0])
}
@@ -353,7 +353,7 @@ func (m *vtabUpdateModule) Create(c *SQLiteConn, args []string) (VTab, error) {
}
// create table
- vtab := &vtabUpdateTable{m.t, dbname, tname, cols, typs, make([][]interface{}, 0)}
+ vtab := &vtabUpdateTable{m.t, dbname, tname, cols, typs, make([][]any, 0)}
m.tables[tname] = vtab
return vtab, nil
}
@@ -370,7 +370,7 @@ type vtabUpdateTable struct {
name string
cols []string
typs []string
- data [][]interface{}
+ data [][]any
}
func (t *vtabUpdateTable) Open() (VTabCursor, error) {
@@ -389,7 +389,7 @@ func (t *vtabUpdateTable) Destroy() error {
return nil
}
-func (t *vtabUpdateTable) Insert(id interface{}, vals []interface{}) (int64, error) {
+func (t *vtabUpdateTable) Insert(id any, vals []any) (int64, error) {
var i int64
if id == nil {
i, t.data = int64(len(t.data)), append(t.data, vals)
@@ -407,7 +407,7 @@ func (t *vtabUpdateTable) Insert(id interface{}, vals []interface{}) (int64, err
return i, nil
}
-func (t *vtabUpdateTable) Update(id interface{}, vals []interface{}) error {
+func (t *vtabUpdateTable) Update(id any, vals []any) error {
i, ok := id.(int64)
if !ok {
return fmt.Errorf("id is invalid type: %T", id)
@@ -422,7 +422,7 @@ func (t *vtabUpdateTable) Update(id interface{}, vals []interface{}) error {
return nil
}
-func (t *vtabUpdateTable) Delete(id interface{}) error {
+func (t *vtabUpdateTable) Delete(id any) error {
i, ok := id.(int64)
if !ok {
return fmt.Errorf("id is invalid type: %T", id)
@@ -465,7 +465,7 @@ func (c *vtabUpdateCursor) Column(ctxt *SQLiteContext, col int) error {
return nil
}
-func (c *vtabUpdateCursor) Filter(ixNum int, ixName string, vals []interface{}) error {
+func (c *vtabUpdateCursor) Filter(ixNum int, ixName string, vals []any) error {
return nil
}
@@ -547,7 +547,7 @@ func (vc *testVTabCursorEponymousOnly) Close() error {
return nil
}
-func (vc *testVTabCursorEponymousOnly) Filter(idxNum int, idxStr string, vals []interface{}) error {
+func (vc *testVTabCursorEponymousOnly) Filter(idxNum int, idxStr string, vals []any) error {
vc.index = 0
return nil
}
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go
index 077d3c6..1f9a755 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_other.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_other.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !windows
// +build !windows
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go
index 102f90c..fb4d325 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_solaris.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_solaris.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build solaris
// +build solaris
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go
index 326361e..63c939d 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_test.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_test.go
@@ -625,7 +625,7 @@ func TestTimestamp(t *testing.T) {
timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
tzTest := time.FixedZone("TEST", -9*3600-13*60)
tests := []struct {
- value interface{}
+ value any
expected time.Time
}{
{"nonsense", time.Time{}},
@@ -827,7 +827,7 @@ func TestFloat32(t *testing.T) {
t.Fatal("Unable to query results:", err)
}
- var id interface{}
+ var id any
if err := rows.Scan(&id); err != nil {
t.Fatal("Unable to scan results:", err)
}
@@ -854,7 +854,7 @@ func TestNull(t *testing.T) {
t.Fatal("Unable to query results:", err)
}
- var v interface{}
+ var v any
if err := rows.Scan(&v); err != nil {
t.Fatal("Unable to scan results:", err)
}
@@ -998,7 +998,7 @@ func TestTimezoneConversion(t *testing.T) {
timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC)
timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
tests := []struct {
- value interface{}
+ value any
expected time.Time
}{
{"nonsense", time.Time{}.In(loc)},
@@ -1128,7 +1128,7 @@ func TestQueryer(t *testing.T) {
if err != nil {
t.Error("Failed to db.Query:", err)
}
- if id != n + 1 {
+ if id != n+1 {
t.Error("Failed to db.Query: not matched results")
}
n = n + 1
@@ -1291,7 +1291,7 @@ const CurrentTimeStamp = "2006-01-02 15:04:05"
type TimeStamp struct{ *time.Time }
-func (t TimeStamp) Scan(value interface{}) error {
+func (t TimeStamp) Scan(value any) error {
var err error
switch v := value.(type) {
case string:
@@ -1335,7 +1335,7 @@ func TestFunctionRegistration(t *testing.T) {
regex := func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
- generic := func(a interface{}) int64 {
+ generic := func(a any) int64 {
switch a.(type) {
case int64:
return 1
@@ -1356,7 +1356,7 @@ func TestFunctionRegistration(t *testing.T) {
}
return ret
}
- variadicGeneric := func(a ...interface{}) int64 {
+ variadicGeneric := func(a ...any) int64 {
return int64(len(a))
}
@@ -1406,7 +1406,7 @@ func TestFunctionRegistration(t *testing.T) {
ops := []struct {
query string
- expected interface{}
+ expected any
}{
{"SELECT addi8_16_32(1,2)", int32(3)},
{"SELECT addi64(1,2)", int64(3)},
@@ -1497,28 +1497,28 @@ func TestAggregatorRegistration(t *testing.T) {
}
type mode struct {
- counts map[interface{}]int
- top interface{}
- topCount int
+ counts map[any]int
+ top any
+ topCount int
}
func newMode() *mode {
- return &mode{
- counts: map[interface{}]int{},
- }
+ return &mode{
+ counts: map[any]int{},
+ }
}
-func (m *mode) Step(x interface{}) {
- m.counts[x]++
- c := m.counts[x]
- if c > m.topCount {
- m.top = x
- m.topCount = c
- }
+func (m *mode) Step(x any) {
+ m.counts[x]++
+ c := m.counts[x]
+ if c > m.topCount {
+ m.top = x
+ m.topCount = c
+ }
}
-func (m *mode) Done() interface{} {
- return m.top
+func (m *mode) Done() any {
+ return m.top
}
func TestAggregatorRegistration_GenericReturn(t *testing.T) {
@@ -1534,19 +1534,19 @@ func TestAggregatorRegistration_GenericReturn(t *testing.T) {
defer db.Close()
_, err = db.Exec("create table foo (department integer, profits integer)")
- if err != nil {
- t.Fatal("Failed to create table:", err)
- }
- _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115), (2, 20)")
- if err != nil {
- t.Fatal("Failed to insert records:", err)
- }
+ if err != nil {
+ t.Fatal("Failed to create table:", err)
+ }
+ _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115), (2, 20)")
+ if err != nil {
+ t.Fatal("Failed to insert records:", err)
+ }
var mode int
- err = db.QueryRow("select mode(profits) from foo").Scan(&mode)
- if err != nil {
- t.Fatal("MODE query error:", err)
- }
+ err = db.QueryRow("select mode(profits) from foo").Scan(&mode)
+ if err != nil {
+ t.Fatal("MODE query error:", err)
+ }
if mode != 20 {
t.Fatal("Got incorrect mode. Wanted 20, got: ", mode)
@@ -1871,7 +1871,7 @@ func TestNonColumnString(t *testing.T) {
}
defer db.Close()
- var x interface{}
+ var x any
if err := db.QueryRow("SELECT 'hello'").Scan(&x); err != nil {
t.Fatal(err)
}
@@ -2113,7 +2113,7 @@ var benchmarks = []testing.InternalBenchmark{
{Name: "BenchmarkStmtRows", F: benchmarkStmtRows},
}
-func (db *TestDB) mustExec(sql string, args ...interface{}) sql.Result {
+func (db *TestDB) mustExec(sql string, args ...any) sql.Result {
res, err := db.Exec(sql, args...)
if err != nil {
db.Fatalf("Error running %q: %v", sql, err)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go
index 56bb914..6c47cce 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_trace.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_trace.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build sqlite_trace || trace
// +build sqlite_trace trace
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go
index 0fd8210..20537a0 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_type.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_type.go
@@ -74,7 +74,7 @@ func scanType(cdt string) reflect.Type {
case SQLITE_TIME:
return reflect.TypeOf(sql.NullTime{})
}
- return reflect.TypeOf(new(interface{}))
+ return reflect.TypeOf(new(any))
}
func databaseTypeConvSqlite(t string) int {
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go
index b6739bf..6527f6f 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_usleep_windows.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_usleep_windows.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build cgo
// +build cgo
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go
index 81aa2ab..f863bcd 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3_windows.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3_windows.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build windows
// +build windows
package sqlite3
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h
index 819e2e3..935437b 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/sqlite3ext.h
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/sqlite3ext.h
@@ -366,6 +366,11 @@ struct sqlite3_api_routines {
int (*value_encoding)(sqlite3_value*);
/* Version 3.41.0 and later */
int (*is_interrupted)(sqlite3*);
+ /* Version 3.43.0 and later */
+ int (*stmt_explain)(sqlite3_stmt*,int);
+ /* Version 3.44.0 and later */
+ void *(*get_clientdata)(sqlite3*,const char*);
+ int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
};
/*
@@ -694,6 +699,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_value_encoding sqlite3_api->value_encoding
/* Version 3.41.0 and later */
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
+/* Version 3.43.0 and later */
+#define sqlite3_stmt_explain sqlite3_api->stmt_explain
+/* Version 3.44.0 and later */
+#define sqlite3_get_clientdata sqlite3_api->get_clientdata
+#define sqlite3_set_clientdata sqlite3_api->set_clientdata
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
diff --git a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go
index f19e842..d2c5a27 100644
--- a/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.17/static_mock.go
+++ b/dependencies/pkg/mod/github.com/mattn/go-sqlite3@v1.14.22/static_mock.go
@@ -3,6 +3,7 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+//go:build !cgo
// +build !cgo
package sqlite3
@@ -28,10 +29,10 @@ type (
)
func (SQLiteDriver) Open(s string) (driver.Conn, error) { return nil, errorMsg }
-func (c *SQLiteConn) RegisterAggregator(string, interface{}, bool) error { return errorMsg }
+func (c *SQLiteConn) RegisterAggregator(string, any, bool) error { return errorMsg }
func (c *SQLiteConn) RegisterAuthorizer(func(int, string, string, string) int) {}
func (c *SQLiteConn) RegisterCollation(string, func(string, string) int) error { return errorMsg }
func (c *SQLiteConn) RegisterCommitHook(func() int) {}
-func (c *SQLiteConn) RegisterFunc(string, interface{}, bool) error { return errorMsg }
+func (c *SQLiteConn) RegisterFunc(string, any, bool) error { return errorMsg }
func (c *SQLiteConn) RegisterRollbackHook(func()) {}
func (c *SQLiteConn) RegisterUpdateHook(func(int, string, string, int64)) {}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS
new file mode 100644
index 0000000..1af2323
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/CODEOWNERS
@@ -0,0 +1 @@
+doctests/* @dmaier-redislabs
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml
index 707670d..707670d 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/FUNDING.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/FUNDING.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md
index 3f934f8..3f934f8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/bug_report.md
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml
index e86d7a6..e86d7a6 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/ISSUE_TEMPLATE/config.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/ISSUE_TEMPLATE/config.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml
index 77b7be5..77b7be5 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/dependabot.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/dependabot.yml
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml
new file mode 100644
index 0000000..9ccb28a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/release-drafter-config.yml
@@ -0,0 +1,48 @@
+name-template: '$NEXT_MINOR_VERSION'
+tag-template: 'v$NEXT_MINOR_VERSION'
+autolabeler:
+ - label: 'maintenance'
+ files:
+ - '*.md'
+ - '.github/*'
+ - label: 'bug'
+ branch:
+ - '/bug-.+'
+ - label: 'maintenance'
+ branch:
+ - '/maintenance-.+'
+ - label: 'feature'
+ branch:
+ - '/feature-.+'
+categories:
+ - title: 'Breaking Changes'
+ labels:
+ - 'breakingchange'
+ - title: '🧪 Experimental Features'
+ labels:
+ - 'experimental'
+ - title: '🚀 New Features'
+ labels:
+ - 'feature'
+ - 'enhancement'
+ - title: '🐛 Bug Fixes'
+ labels:
+ - 'fix'
+ - 'bugfix'
+ - 'bug'
+ - 'BUG'
+ - title: '🧰 Maintenance'
+ label: 'maintenance'
+change-template: '- $TITLE (#$NUMBER)'
+exclude-labels:
+ - 'skip-changelog'
+template: |
+ # Changes
+
+ $CHANGES
+
+ ## Contributors
+ We'd like to thank all the contributors who worked on this release!
+
+ $CONTRIBUTORS
+
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml
new file mode 100644
index 0000000..b8ca6cc
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/spellcheck-settings.yml
@@ -0,0 +1,29 @@
+matrix:
+- name: Markdown
+ expect_match: false
+ apsell:
+ lang: en
+ d: en_US
+ ignore-case: true
+ dictionary:
+ wordlists:
+ - .github/wordlist.txt
+ output: wordlist.dic
+ pipeline:
+ - pyspelling.filters.markdown:
+ markdown_extensions:
+ - markdown.extensions.extra:
+ - pyspelling.filters.html:
+ comments: false
+ attributes:
+ - alt
+ ignores:
+ - ':matches(code, pre)'
+ - code
+ - pre
+ - blockquote
+ - img
+ sources:
+ - 'README.md'
+ - 'FAQ.md'
+ - 'docs/**'
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt
new file mode 100644
index 0000000..52fdc1b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/wordlist.txt
@@ -0,0 +1,60 @@
+ACLs
+autoload
+autoloader
+autoloading
+analytics
+Autoloading
+backend
+backends
+behaviour
+CAS
+ClickHouse
+config
+customizable
+Customizable
+dataset
+de
+DisableIdentity
+ElastiCache
+extensibility
+FPM
+Golang
+IANA
+keyspace
+keyspaces
+Kvrocks
+localhost
+Lua
+MSSQL
+namespace
+NoSQL
+ORM
+Packagist
+PhpRedis
+pipelining
+pluggable
+Predis
+PSR
+Quickstart
+README
+rebalanced
+rebalancing
+redis
+Redis
+RocksDB
+runtime
+SHA
+sharding
+SETNAME
+SSL
+struct
+stunnel
+TCP
+TLS
+uri
+URI
+url
+variadic
+RedisStack
+RedisGears
+RedisTimeseries \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml
index a574e2e..e788016 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.github/workflows/build.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/build.yml
@@ -2,9 +2,12 @@ name: Go
on:
push:
- branches: [master]
+ branches: [master, v9]
pull_request:
- branches: [master]
+ branches: [master, v9]
+
+permissions:
+ contents: read
jobs:
build:
@@ -13,11 +16,11 @@ jobs:
strategy:
fail-fast: false
matrix:
- go-version: [1.16.x, 1.17.x]
+ go-version: [1.20.x, 1.21.x]
services:
redis:
- image: redis
+ image: redis/redis-stack-server:edge
options: >-
--health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
ports:
@@ -25,12 +28,12 @@ jobs:
steps:
- name: Set up ${{ matrix.go-version }}
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Test
run: make test
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml
new file mode 100644
index 0000000..6e49e64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/doctests.yaml
@@ -0,0 +1,41 @@
+name: Documentation Tests
+
+on:
+ push:
+ branches: [master, examples]
+ pull_request:
+ branches: [master, examples]
+
+permissions:
+ contents: read
+
+jobs:
+ doctests:
+ name: doctests
+ runs-on: ubuntu-latest
+
+ services:
+ redis-stack:
+ image: redis/redis-stack-server:latest
+ options: >-
+ --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
+ ports:
+ - 6379:6379
+
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [ "1.18", "1.19", "1.20", "1.21" ]
+
+ steps:
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Test doc examples
+ working-directory: ./doctests
+ run: go test
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..a139f5d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/golangci-lint.yml
@@ -0,0 +1,26 @@
+name: golangci-lint
+
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+ - main
+ - v9
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ golangci:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v4
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml
new file mode 100644
index 0000000..6695abf
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/release-drafter.yml
@@ -0,0 +1,24 @@
+name: Release Drafter
+
+on:
+ push:
+ # branches to consider in the event; optional, defaults to all
+ branches:
+ - master
+
+permissions: {}
+jobs:
+ update_release_draft:
+ permissions:
+ pull-requests: write # to add label to PR (release-drafter/release-drafter)
+ contents: write # to create a github release (release-drafter/release-drafter)
+
+ runs-on: ubuntu-latest
+ steps:
+ # Drafts your next Release notes as Pull Requests are merged into "master"
+ - uses: release-drafter/release-drafter@v6
+ with:
+ # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
+ config-name: release-drafter-config.yml
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml
new file mode 100644
index 0000000..f739a54
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/spellcheck.yml
@@ -0,0 +1,14 @@
+name: spellcheck
+on:
+ pull_request:
+jobs:
+ check-spelling:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Check Spelling
+ uses: rojopolis/spellcheck-github-actions@0.36.0
+ with:
+ config_path: .github/spellcheck-settings.yml
+ task_name: Markdown
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml
new file mode 100644
index 0000000..445af1c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/stale-issues.yml
@@ -0,0 +1,25 @@
+name: "Close stale issues"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions: {}
+jobs:
+ stale:
+ permissions:
+ issues: write # to close stale issues (actions/stale)
+ pull-requests: write # to close stale PRs (actions/stale)
+
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue is marked stale. It will be closed in 30 days if it is not updated.'
+ stale-pr-message: 'This pull request is marked stale. It will be closed in 30 days if it is not updated.'
+ days-before-stale: 365
+ days-before-close: 30
+ stale-issue-label: "Stale"
+ stale-pr-label: "Stale"
+ operations-per-run: 10
+ remove-stale-when-updated: true
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml
new file mode 100644
index 0000000..82946dd
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.github/workflows/test-redis-enterprise.yml
@@ -0,0 +1,59 @@
+name: RE Tests
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ name: build
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ go-version: [1.21.x]
+ re-build: ["7.2.4-108"]
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Clone Redis EE docker repository
+ uses: actions/checkout@v4
+ with:
+ repository: RedisLabs/redis-ee-docker
+ path: redis-ee
+
+ - name: Set up ${{ matrix.go-version }}
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.go-version }}
+
+ - name: Build cluster
+ working-directory: redis-ee
+ env:
+ IMAGE: "redislabs/redis-internal:${{ matrix.re-build }}"
+ RE_USERNAME: ${{ secrets.RE_USERNAME }}
+ RE_PASS: ${{ secrets.RE_PASS }}
+ RE_CLUSTER_NAME: ${{ secrets.RE_CLUSTER_NAME }}
+ RE_USE_OSS_CLUSTER: false
+ RE_DB_PORT: ${{ secrets.RE_DB_PORT }}
+ DOCKER_ACCESS_TOKEN: ${{ secrets.DOCKER_ACCESS_TOKEN }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ run: ./build.sh
+
+ - name: Test
+ env:
+ RE_CLUSTER: "1"
+ run: |
+ go test \
+ --ginkgo.skip-file="ring_test.go" \
+ --ginkgo.skip-file="sentinel_test.go" \
+ --ginkgo.skip-file="osscluster_test.go" \
+ --ginkgo.skip-file="pubsub_test.go" \
+ --ginkgo.skip-file="gears_commands_test.go" \
+ --ginkgo.label-filter='!NonRedisEnterprise'
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore
new file mode 100644
index 0000000..6f86889
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.gitignore
@@ -0,0 +1,6 @@
+*.rdb
+testdata/*
+.idea/
+.DS_Store
+*.tar.gz
+*.dic \ No newline at end of file
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml
index de51455..de51455 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.golangci.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.golangci.yml
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml
index 8b7f044..8b7f044 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/.prettierrc.yml
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/.prettierrc.yml
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md
new file mode 100644
index 0000000..297438a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CHANGELOG.md
@@ -0,0 +1,124 @@
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md
new file mode 100644
index 0000000..90030b8
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/CONTRIBUTING.md
@@ -0,0 +1,101 @@
+# Contributing
+
+## Introduction
+
+We appreciate your interest in considering contributing to go-redis.
+Community contributions mean a lot to us.
+
+## Contributions we need
+
+You may already know how you'd like to contribute, whether it's a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know where to start, consider improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/redis/go-redis/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+- <http://makeapullrequest.com/>
+- <http://www.firsttimersonly.com/>
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of go-redis
+2. Do the changes in your fork
+3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running:
+ ```docker run -p 6379:6379 -it redis/redis-stack-server:edge```
+4. While developing, make sure the tests pass by running `make tests`
+5. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+## Testing
+
+Call `make test` to run all tests, including linters.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of python. Feel free to test your
+changes against all the go versions supported, as declared by the
+[build.yml](./.github/workflows/build.yml) file.
+
+### Troubleshooting
+
+If you get any errors when running `make test`, make sure
+that you are using supported versions of Docker and go.
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source (<oss@redis.com>)](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of go-redis are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Suggest a feature or enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be underway on the feature you want or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code review process
+
+The core team regularly looks at pull requests. We will provide
+feedback as soon as possible. After receiving our feedback, please respond
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE
index 298bed9..f4967db 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/LICENSE
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2013 The github.com/go-redis/redis Authors.
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile
new file mode 100644
index 0000000..dc2fe78
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/Makefile
@@ -0,0 +1,44 @@
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy -compat=1.18 && \
+ go test && \
+ go test ./... -short -race && \
+ go test ./... -run=NONE -bench=. -benchmem && \
+ env GOOS=linux GOARCH=386 go test && \
+ go vet); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench fmt
+
+build:
+ go build .
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+fmt:
+ gofumpt -w ./
+ goimports -w -local github.com/redis/go-redis ./
+
+go_mod_tidy:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u ./... && \
+ go mod tidy -compat=1.18); \
+ done
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md
new file mode 100644
index 0000000..043d3f0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/README.md
@@ -0,0 +1,274 @@
+# Redis client for Go
+
+[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which
+> demonstrates how you can use Uptrace to monitor go-redis.
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://university.redis.com/)
+
+[Build faster with the Redis Launchpad](https://launchpad.redis.com/)
+
+[Try the Redis Cloud](https://redis.com/try-free/)
+
+[Dive in developer tutorials](https://developer.redis.com/)
+
+[Join the Redis community](https://redis.com/community/)
+
+[Work at Redis](https://redis.com/company/careers/jobs/)
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [简体中文](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/rWtp5Aj)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis commands except QUIT and SYNC.
+- Automatic connection pooling.
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+Then install go-redis/**v9**:
+
+```shell
+go get github.com/redis/go-redis/v9
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+The above can be modified to specify the version of the RESP protocol by adding the `protocol`
+option to the `Options` struct:
+
+```go
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+ })
+
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the
+[redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt).
+The example below demonstrates how the connection can easily be configured using a string, adhering
+to this specification.
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient() *redis.Client {
+ url := "redis://user:password@localhost:6379/0?protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+
+ return redis.NewClient(opts)
+}
+
+```
+
+
+### Advanced Configuration
+
+go-redis supports extending the client identification phase to allow projects to send their own custom client identification.
+
+#### Default Client Identification
+
+By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature.
+
+#### Disabling Identity Verification
+
+When connection identity verification is not required or needs to be explicitly disabled, a `DisableIndentity` configuration option exists. In V10 of this library, `DisableIndentity` will become `DisableIdentity` in order to fix the associated typo.
+
+To disable verification, set the `DisableIndentity` option to `true` in the Redis client options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ DisableIndentity: true, // Disable set-info on connect
+})
+```
+
+## Contributing
+
+Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library!
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```go
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```shell
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```shell
+go test
+```
+
+Another option is to run your specific tests with an already running redis. The example below, tests
+against a redis running on port 9999.:
+
+```shell
+REDIS_PORT=9999 go test <your options>
+```
+
+## See also
+
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/redis/go-redis/graphs/contributors">
+ <img src="https://contributors-img.web.app/image?repo=redis/go-redis" />
+</a>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md
index 1115db4..1115db4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/RELEASING.md
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/RELEASING.md
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go
new file mode 100644
index 0000000..06847be
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/acl_commands.go
@@ -0,0 +1,35 @@
+package redis
+
+import "context"
+
+type ACLCmdable interface {
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd {
+ args := make([]interface{}, 0, 3+len(command))
+ args = append(args, "acl", "dryrun", username)
+ args = append(args, command...)
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd {
+ args := make([]interface{}, 0, 3)
+ args = append(args, "acl", "log")
+ if count > 0 {
+ args = append(args, count)
+ }
+ cmd := NewACLLogCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "acl", "log", "reset")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go
index 8382806..16bdf2c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_decode_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_decode_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
var ctx = context.TODO()
@@ -18,15 +18,19 @@ type ClientStub struct {
resp []byte
}
+var initHello = []byte("%1\r\n+proto\r\n:3\r\n")
+
func NewClientStub(resp []byte) *ClientStub {
stub := &ClientStub{
resp: resp,
}
+
stub.Cmdable = NewClient(&Options{
PoolSize: 128,
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
- return stub.stubConn(), nil
+ return stub.stubConn(initHello), nil
},
+ DisableIndentity: true,
})
return stub
}
@@ -38,10 +42,12 @@ func NewClusterClientStub(resp []byte) *ClientStub {
client := NewClusterClient(&ClusterOptions{
PoolSize: 128,
- Addrs: []string{"127.0.0.1:6379"},
+ Addrs: []string{":6379"},
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
- return stub.stubConn(), nil
+ return stub.stubConn(initHello), nil
},
+ DisableIndentity: true,
+
ClusterSlots: func(_ context.Context) ([]ClusterSlot, error) {
return []ClusterSlot{
{
@@ -53,30 +59,31 @@ func NewClusterClientStub(resp []byte) *ClientStub {
},
})
- // init command.
- tmpClient := NewClient(&Options{Addr: ":6379"})
- cmdsInfo, err := tmpClient.Command(ctx).Result()
- _ = tmpClient.Close()
- client.cmdsInfoCache = newCmdsInfoCache(func(_ context.Context) (map[string]*CommandInfo, error) {
- return cmdsInfo, err
- })
-
stub.Cmdable = client
return stub
}
-func (c *ClientStub) stubConn() *ConnStub {
+func (c *ClientStub) stubConn(init []byte) *ConnStub {
return &ConnStub{
+ init: init,
resp: c.resp,
}
}
type ConnStub struct {
+ init []byte
resp []byte
pos int
}
func (c *ConnStub) Read(b []byte) (n int, err error) {
+ // Return conn.init()
+ if len(c.init) > 0 {
+ n = copy(b, c.init)
+ c.init = c.init[n:]
+ return n, nil
+ }
+
if len(c.resp) == 0 {
return 0, io.EOF
}
@@ -106,7 +113,7 @@ func BenchmarkDecode(b *testing.B) {
}
benchmarks := []Benchmark{
- {"single", NewClientStub},
+ {"server", NewClientStub},
{"cluster", NewClusterClientStub},
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go
index ba81ce8..8e23303 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/bench_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bench_test.go
@@ -10,7 +10,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
@@ -223,7 +223,7 @@ func BenchmarkZAdd(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- err := client.ZAdd(ctx, "key", &redis.Z{
+ err := client.ZAdd(ctx, "key", redis.Z{
Score: float64(1),
Member: "hello",
}).Err()
@@ -273,36 +273,6 @@ func BenchmarkXRead(b *testing.B) {
})
}
-var clientSink *redis.Client
-
-func BenchmarkWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := benchmarkRedisClient(ctx, 10)
- defer rdb.Close()
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- clientSink = rdb.WithContext(ctx)
- }
-}
-
-var ringSink *redis.Ring
-
-func BenchmarkRingWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := redis.NewRing(&redis.RingOptions{})
- defer rdb.Close()
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- ringSink = rdb.WithContext(ctx)
- }
-}
-
//------------------------------------------------------------------------------
func newClusterScenario() *clusterScenario {
@@ -396,17 +366,77 @@ func BenchmarkClusterSetString(b *testing.B) {
})
}
-var clusterSink *redis.ClusterClient
+func BenchmarkExecRingSetAddrsCmd(b *testing.B) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+ )
-func BenchmarkClusterWithContext(b *testing.B) {
- ctx := context.Background()
- rdb := redis.NewClusterClient(&redis.ClusterOptions{})
- defer rdb.Close()
+ for _, port := range []string{ringShard1Port, ringShard2Port} {
+ if _, err := startRedis(port); err != nil {
+ b.Fatal(err)
+ }
+ }
- b.ResetTimer()
- b.ReportAllocs()
+ b.Cleanup(func() {
+ for _, p := range processes {
+ if err := p.Close(); err != nil {
+ b.Errorf("Failed to stop redis process: %v", err)
+ }
+ }
+ processes = nil
+ })
+
+ ring := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ },
+ NewClient: func(opt *redis.Options) *redis.Client {
+ // Simulate slow shard creation
+ time.Sleep(100 * time.Millisecond)
+ return redis.NewClient(opt)
+ },
+ })
+ defer ring.Close()
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ b.Fatal(err)
+ }
+
+ // Continuously update addresses by adding and removing one address
+ updatesDone := make(chan struct{})
+ defer func() { close(updatesDone) }()
+ go func() {
+ ticker := time.NewTicker(10 * time.Millisecond)
+ defer ticker.Stop()
+ for i := 0; ; i++ {
+ select {
+ case <-ticker.C:
+ if i%2 == 0 {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ })
+ } else {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ ringShard2Name: ":" + ringShard2Port,
+ })
+ }
+ case <-updatesDone:
+ return
+ }
+ }
+ }()
+
+ b.ResetTimer()
for i := 0; i < b.N; i++ {
- clusterSink = rdb.WithContext(ctx)
+ if _, err := ring.Ping(context.Background()).Result(); err != nil {
+ if err == redis.ErrClosed {
+ // The shard client could be closed while ping command is in progress
+ continue
+ } else {
+ b.Fatal(err)
+ }
+ }
}
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go
new file mode 100644
index 0000000..d9fc50d
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands.go
@@ -0,0 +1,163 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type BitMapCmdable interface {
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
+ BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+}
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+ Unit string // BYTE(default) | BIT
+}
+
+const BitCountIndexByte string = "BYTE"
+const BitCountIndexBit string = "BIT"
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ if bitCount.Unit == "" {
+ bitCount.Unit = "BYTE"
+ }
+ if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit {
+ cmd := NewIntCmd(ctx)
+ cmd.SetErr(errors.New("redis: invalid bitcount index"))
+ return cmd
+ }
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ string(bitCount.Unit),
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitField accepts multiple values:
+// - BitField("set", "i1", "offset1", "value1","cmd2", "type2", "offset2", "value2")
+// - BitField([]string{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+// - BitField([]interface{}{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+func (c cmdable) BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "bitfield"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitFieldRO - Read-only variant of the BITFIELD command.
+// It is like the original BITFIELD but only accepts GET subcommand and can safely be used in read-only replicas.
+// - BitFieldRO(ctx, key, "<Encoding0>", "<Offset0>", "<Encoding1>","<Offset1>")
+func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "BITFIELD_RO"
+ args[1] = key
+ if len(values)%2 != 0 {
+ panic("BitFieldRO: invalid number of arguments, must be even")
+ }
+ for i := 0; i < len(values); i += 2 {
+ args = append(args, "GET", values[i], values[i+1])
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go
new file mode 100644
index 0000000..f3cc320
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/bitmap_commands_test.go
@@ -0,0 +1,98 @@
+package redis_test
+
+import (
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+ "github.com/redis/go-redis/v9"
+)
+
+type bitCountExpected struct {
+ Start int64
+ End int64
+ Expected int64
+}
+
+var _ = Describe("BitCountBite", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 1, 0, 0, 1, 0, 1, 0, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count bite", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 0},
+ {0, 1, 1},
+ {0, 2, 1},
+ {0, 3, 1},
+ {0, 4, 2},
+ {0, 5, 2},
+ {0, 6, 3},
+ {0, 7, 3},
+ {0, 8, 4},
+ {0, 9, 5},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexBit})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
+
+var _ = Describe("BitCountByte", func() {
+ var client *redis.Client
+ key := "bit_count_test"
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ values := []int{0, 0, 0, 0, 0, 0, 0, 1, 1, 1}
+ for i, v := range values {
+ cmd := client.SetBit(ctx, key, int64(i), v)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("bit count byte", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexByte})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+
+ It("bit count byte with no unit specified", func() {
+ var expected = []bitCountExpected{
+ {0, 0, 1},
+ {0, 1, 3},
+ }
+
+ for _, e := range expected {
+ cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End})
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal(e.Expected))
+ }
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go
new file mode 100644
index 0000000..0caf097
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/cluster_commands.go
@@ -0,0 +1,192 @@
+package redis
+
+import "context"
+
+type ClusterCmdable interface {
+ ClusterMyShardID(ctx context.Context) *StringCmd
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myshardid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterShards(ctx context.Context) *ClusterShardsCmd {
+ cmd := NewClusterShardsCmd(ctx, "cluster", "shards")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterLinks(ctx context.Context) *ClusterLinksCmd {
+ cmd := NewClusterLinksCmd(ctx, "cluster", "links")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go
new file mode 100644
index 0000000..9fb9a83
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command.go
@@ -0,0 +1,5483 @@
+package redis
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type Cmder interface {
+ // command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
+ Name() string
+
+ // full command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+ FullName() string
+
+ // all args of the command.
+ // e.g. "set k v ex 10" -> "[set k v ex 10]".
+ Args() []interface{}
+
+ // format request and response string.
+ // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+ String() string
+
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return util.BytesToString(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
+
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ err = nil
+ }
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringStringCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*MapStringStringCmd)(nil)
+
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringIntCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*MapStringIntCmd)(nil)
+
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringIntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+// ------------------------------------------------------------------------------
+type MapStringSliceInterfaceCmd struct {
+ baseCmd
+ val map[string][]interface{}
+}
+
+func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+ return &MapStringSliceInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringSliceInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string][]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[k] = make([]interface{}, nn)
+ for j := 0; j < nn; j++ {
+ value, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[k][j] = value
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
+
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
+}
+
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ if err != nil && err != Nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ------------------------------------------------------------------------------------------------------------------
+
+type SlotRange struct {
+ Start int64
+ End int64
+}
+
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
+}
+
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
+ baseCmd
+
+ val []ClusterShard
+}
+
+var _ Cmder = (*ClusterShardsCmd)(nil)
+
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
+}
+
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
+}
+
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
+}
+
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
+}
+
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// LibraryInfo holds the library info.
+type LibraryInfo struct {
+ LibName *string
+ LibVer *string
+}
+
+// WithLibraryName returns a valid LibraryInfo with library name only.
+func WithLibraryName(libName string) LibraryInfo {
+ return LibraryInfo{LibName: &libName}
+}
+
+// WithLibraryVersion returns a valid LibraryInfo with library version only.
+func WithLibraryVersion(libVer string) LibraryInfo {
+ return LibraryInfo{LibVer: &libVer}
+}
+
+// -------------------------------------------
+
+type InfoCmd struct {
+ baseCmd
+ val map[string]map[string]string
+}
+
+var _ Cmder = (*InfoCmd)(nil)
+
+func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+ return &InfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *InfoCmd) Val() map[string]map[string]string {
+ return cmd.val
+}
+
+func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *InfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+ val, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ section := ""
+ scanner := bufio.NewScanner(strings.NewReader(val))
+ moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ if cmd.val == nil {
+ cmd.val = make(map[string]map[string]string)
+ }
+ section = strings.TrimPrefix(line, "# ")
+ cmd.val[section] = make(map[string]string)
+ } else if line != "" {
+ if section == "Modules" {
+ kv := moduleRe.FindStringSubmatch(line)
+ if len(kv) == 3 {
+ cmd.val[section][kv[1]] = kv[2]
+ }
+ } else {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) == 2 {
+ cmd.val[section][kv[0]] = kv[1]
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (cmd *InfoCmd) Item(section, key string) string {
+ if cmd.val == nil {
+ return ""
+ } else if cmd.val[section] == nil {
+ return ""
+ } else {
+ return cmd.val[section][key]
+ }
+}
+
+type MonitorStatus int
+
+const (
+ monitorStatusIdle MonitorStatus = iota
+ monitorStatusStart
+ monitorStatusStop
+)
+
+type MonitorCmd struct {
+ baseCmd
+ ch chan string
+ status MonitorStatus
+ mu sync.Mutex
+}
+
+func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+ return &MonitorCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"monitor"},
+ },
+ ch: ch,
+ status: monitorStatusIdle,
+ mu: sync.Mutex{},
+ }
+}
+
+func (cmd *MonitorCmd) String() string {
+ return cmdString(cmd, nil)
+}
+
+func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+ ctx, cancel := context.WithCancel(cmd.ctx)
+ go func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := cmd.readMonitor(rd, cancel)
+ if err != nil {
+ cmd.err = err
+ return
+ }
+ }
+ }
+ }(ctx)
+ return nil
+}
+
+func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+ for {
+ cmd.mu.Lock()
+ st := cmd.status
+ cmd.mu.Unlock()
+ if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart {
+ line, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.ch <- line
+ }
+ if st == monitorStatusStop {
+ cancel()
+ break
+ }
+ }
+ return nil
+}
+
+func (cmd *MonitorCmd) Start() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStart
+}
+
+func (cmd *MonitorCmd) Stop() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStop
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go
index 168f9f6..b9d558c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/command_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/command_test.go
@@ -4,10 +4,10 @@ import (
"errors"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ "github.com/redis/go-redis/v9"
- redis "github.com/go-redis/redis/v8"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
)
var _ = Describe("Cmd", func() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go
new file mode 100644
index 0000000..db59594
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands.go
@@ -0,0 +1,718 @@
+package redis
+
+import (
+ "context"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
+ default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
+
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
+
+ return append(dst, arg)
+ }
+}
+
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
+
+ field := v.Field(i)
+
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
+
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
+ }
+
+ return dst
+}
+
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ }
+ return false
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ ACLCmdable
+ BitMapCmdable
+ ClusterCmdable
+ GearsCmdable
+ GenericCmdable
+ GeoCmdable
+ HashCmdable
+ HyperLogLogCmdable
+ ListCmdable
+ ProbabilisticCmdable
+ PubSubCmdable
+ ScriptingFunctionsCmdable
+ SetCmdable
+ SortedSetCmdable
+ StringCmdable
+ StreamCmdable
+ TimeseriesCmdable
+ JSONCmdable
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+ ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+ err := info.Validate()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ var cmd *StatusCmd
+ if info.LibName != nil {
+ libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version()))
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+ } else {
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Validate checks if only one field in the struct is non-nil.
+func (info LibraryInfo) Validate() error {
+ if info.LibName != nil && info.LibVer != nil {
+ return errors.New("both LibName and LibVer cannot be set at the same time")
+ }
+ if info.LibName == nil && info.LibVer == nil {
+ return errors.New("at least one of LibName and LibVer should be set")
+ }
+ return nil
+}
+
+// Hello Set the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string,
+) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
+ }
+ if clientName != "" {
+ args = append(args, "setname", clientName)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
+}
+
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnpause(ctx context.Context) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "unpause")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientInfo(ctx context.Context) *ClientInfoCmd {
+ cmd := NewClientInfoCmd(ctx, "client", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------------------------
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, sections ...string) *StringCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) InfoMap(ctx context.Context, sections ...string) *InfoCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// ModuleLoadexConfig struct is used to specify the arguments for the MODULE LOADEX command of redis.
+// `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]`
+type ModuleLoadexConfig struct {
+ Path string
+ Conf map[string]interface{}
+ Args []interface{}
+}
+
+func (c *ModuleLoadexConfig) toArgs() []interface{} {
+ args := make([]interface{}, 3, 3+len(c.Conf)*3+len(c.Args)*2)
+ args[0] = "MODULE"
+ args[1] = "LOADEX"
+ args[2] = c.Path
+ for k, v := range c.Conf {
+ args = append(args, "CONFIG", k, v)
+ }
+ for _, arg := range c.Args {
+ args = append(args, "ARGS", arg)
+ }
+ return args
+}
+
+// ModuleLoadex Redis `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]` command.
+func (c cmdable) ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd {
+ cmd := NewStringCmd(ctx, conf.toArgs()...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+/*
+Monitor - represents a Redis MONITOR command, allowing the user to capture
+and process all commands sent to a Redis server. This mimics the behavior of
+MONITOR in the redis-cli.
+
+Notes:
+- Using MONITOR blocks the connection to the server for itself. It needs a dedicated connection
+- The user should create a channel of type string
+- This runs concurrently in the background. Trigger via the Start and Stop functions
+See further: Redis MONITOR command: https://redis.io/commands/monitor
+*/
+func (c cmdable) Monitor(ctx context.Context, ch chan string) *MonitorCmd {
+ cmd := newMonitorCmd(ctx, ch)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go
index 030bdf3..d30a9d8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/commands_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/commands_test.go
@@ -5,15 +5,25 @@ import (
"encoding/json"
"fmt"
"reflect"
+ "strconv"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/v9/internal/proto"
)
+type TimeValue struct {
+ time.Time
+}
+
+func (t *TimeValue) ScanRedis(s string) (err error) {
+ t.Time, err = time.Parse(time.RFC3339Nano, s)
+ return
+}
+
var _ = Describe("Commands", func() {
ctx := context.TODO()
var client *redis.Client
@@ -47,6 +57,17 @@ var _ = Describe("Commands", func() {
Expect(stats.IdleConns).To(Equal(uint32(1)))
})
+ It("should hello", func() {
+ cmds, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
+ pipe.Hello(ctx, 3, "", "", "")
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ m, err := cmds[0].(*redis.MapStringInterfaceCmd).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(m["proto"]).To(Equal(int64(3)))
+ })
+
It("should Echo", func() {
pipe := client.Pipeline()
echo := pipe.Echo(ctx, "hello")
@@ -74,7 +95,19 @@ var _ = Describe("Commands", func() {
Expect(time.Now()).To(BeTemporally("~", start.Add(wait), 3*time.Second))
})
- It("should Select", func() {
+ It("should WaitAOF", func() {
+ const waitAOF = 3 * time.Second
+ Skip("flaky test")
+
+ // assuming that the redis instance doesn't have AOF enabled
+ start := time.Now()
+ val, err := client.WaitAOF(ctx, 1, 1, waitAOF).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(ContainSubstring("ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled"))
+ Expect(time.Now()).To(BeTemporally("~", start.Add(waitAOF), 3*time.Second))
+ })
+
+ It("should Select", Label("NonRedisEnterprise"), func() {
pipe := client.Pipeline()
sel := pipe.Select(ctx, 1)
_, err := pipe.Exec(ctx)
@@ -84,7 +117,7 @@ var _ = Describe("Commands", func() {
Expect(sel.Val()).To(Equal("OK"))
})
- It("should SwapDB", func() {
+ It("should SwapDB", Label("NonRedisEnterprise"), func() {
pipe := client.Pipeline()
sel := pipe.SwapDB(ctx, 1, 2)
_, err := pipe.Exec(ctx)
@@ -111,6 +144,43 @@ var _ = Describe("Commands", func() {
}, "30s").Should(Equal("Background saving started"))
})
+ It("Should CommandGetKeys", func() {
+ keys, err := client.CommandGetKeys(ctx, "MSET", "a", "b", "c", "d", "e", "f").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"a", "c", "e"}))
+
+ keys, err = client.CommandGetKeys(ctx, "EVAL", "not consulted", "3", "key1", "key2", "key3", "arg1", "arg2", "arg3", "argN").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"key1", "key2", "key3"}))
+
+ keys, err = client.CommandGetKeys(ctx, "SORT", "mylist", "ALPHA", "STORE", "outlist").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(Equal([]string{"mylist", "outlist"}))
+
+ _, err = client.CommandGetKeys(ctx, "FAKECOMMAND", "arg1", "arg2").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR Invalid command specified"))
+ })
+
+ It("should CommandGetKeysAndFlags", func() {
+ keysAndFlags, err := client.CommandGetKeysAndFlags(ctx, "LMOVE", "mylist1", "mylist2", "left", "left").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keysAndFlags).To(Equal([]redis.KeyFlags{
+ {
+ Key: "mylist1",
+ Flags: []string{"RW", "access", "delete"},
+ },
+ {
+ Key: "mylist2",
+ Flags: []string{"RW", "insert"},
+ },
+ }))
+
+ _, err = client.CommandGetKeysAndFlags(ctx, "FAKECOMMAND", "arg1", "arg2").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR Invalid command specified"))
+ })
+
It("should ClientKill", func() {
r := client.ClientKill(ctx, "1.1.1.1:1111")
Expect(r.Err()).To(MatchError("ERR No such client"))
@@ -143,7 +213,13 @@ var _ = Describe("Commands", func() {
Expect(r).To(Equal(int64(0)))
})
- It("should ClientPause", func() {
+ It("should ClientInfo", func() {
+ info, err := client.ClientInfo(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).NotTo(BeNil())
+ })
+
+ It("should ClientPause", Label("NonRedisEnterprise"), func() {
err := client.ClientPause(ctx, time.Second).Err()
Expect(err).NotTo(HaveOccurred())
@@ -167,30 +243,92 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("theclientname"))
})
+ It("should ClientSetInfo", func() {
+ pipe := client.Pipeline()
+
+ // Test setting the libName
+ libName := "go-redis"
+ libInfo := redis.WithLibraryName(libName)
+ setInfo := pipe.ClientSetInfo(ctx, libInfo)
+ _, err := pipe.Exec(ctx)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(setInfo.Err()).NotTo(HaveOccurred())
+ Expect(setInfo.Val()).To(Equal("OK"))
+
+ // Test setting the libVer
+ libVer := "vX.x"
+ libInfo = redis.WithLibraryVersion(libVer)
+ setInfo = pipe.ClientSetInfo(ctx, libInfo)
+ _, err = pipe.Exec(ctx)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(setInfo.Err()).NotTo(HaveOccurred())
+ Expect(setInfo.Val()).To(Equal("OK"))
+
+ // Test setting both fields, expect a panic
+ libInfo = redis.LibraryInfo{LibName: &libName, LibVer: &libVer}
+
+ Expect(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ err := r.(error)
+ Expect(err).To(MatchError("both LibName and LibVer cannot be set at the same time"))
+ }
+ }()
+ pipe.ClientSetInfo(ctx, libInfo)
+ }).To(Panic())
+
+ // Test setting neither field, expect a panic
+ libInfo = redis.LibraryInfo{}
+
+ Expect(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ err := r.(error)
+ Expect(err).To(MatchError("at least one of LibName and LibVer should be set"))
+ }
+ }()
+ pipe.ClientSetInfo(ctx, libInfo)
+ }).To(Panic())
+ // Test setting the default options for libName, libName suffix and libVer
+ clientInfo := client.ClientInfo(ctx).Val()
+ Expect(clientInfo.LibName).To(ContainSubstring("go-redis(go-redis,"))
+ // Test setting the libName suffix in options
+ opt := redisOptions()
+ opt.IdentitySuffix = "suffix"
+ client2 := redis.NewClient(opt)
+ defer client2.Close()
+ clientInfo = client2.ClientInfo(ctx).Val()
+ Expect(clientInfo.LibName).To(ContainSubstring("go-redis(suffix,"))
+
+ })
+
It("should ConfigGet", func() {
val, err := client.ConfigGet(ctx, "*").Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).NotTo(BeEmpty())
})
- It("should ConfigResetStat", func() {
+ It("should ConfigResetStat", Label("NonRedisEnterprise"), func() {
r := client.ConfigResetStat(ctx)
Expect(r.Err()).NotTo(HaveOccurred())
Expect(r.Val()).To(Equal("OK"))
})
- It("should ConfigSet", func() {
+ It("should ConfigSet", Label("NonRedisEnterprise"), func() {
configGet := client.ConfigGet(ctx, "maxmemory")
Expect(configGet.Err()).NotTo(HaveOccurred())
- Expect(configGet.Val()).To(HaveLen(2))
- Expect(configGet.Val()[0]).To(Equal("maxmemory"))
+ Expect(configGet.Val()).To(HaveLen(1))
+ _, ok := configGet.Val()["maxmemory"]
+ Expect(ok).To(BeTrue())
- configSet := client.ConfigSet(ctx, "maxmemory", configGet.Val()[1].(string))
+ configSet := client.ConfigSet(ctx, "maxmemory", configGet.Val()["maxmemory"])
Expect(configSet.Err()).NotTo(HaveOccurred())
Expect(configSet.Val()).To(Equal("OK"))
})
- It("should ConfigRewrite", func() {
+ It("should ConfigRewrite", Label("NonRedisEnterprise"), func() {
configRewrite := client.ConfigRewrite(ctx)
Expect(configRewrite.Err()).NotTo(HaveOccurred())
Expect(configRewrite.Val()).To(Equal("OK"))
@@ -208,6 +346,20 @@ var _ = Describe("Commands", func() {
Expect(info.Val()).NotTo(Equal(""))
})
+ It("should InfoMap", Label("redis.info"), func() {
+ info := client.InfoMap(ctx)
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(BeNil())
+
+ info = client.InfoMap(ctx, "dummy")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).To(BeNil())
+
+ info = client.InfoMap(ctx, "server")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).To(HaveLen(1))
+ })
+
It("should Info cpu", func() {
info := client.Info(ctx, "cpu")
Expect(info.Err()).NotTo(HaveOccurred())
@@ -215,20 +367,28 @@ var _ = Describe("Commands", func() {
Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
})
- It("should LastSave", func() {
+ It("should Info cpu and memory", func() {
+ info := client.Info(ctx, "cpu", "memory")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
+ Expect(info.Val()).To(ContainSubstring(`memory`))
+ })
+
+ It("should LastSave", Label("NonRedisEnterprise"), func() {
lastSave := client.LastSave(ctx)
Expect(lastSave.Err()).NotTo(HaveOccurred())
Expect(lastSave.Val()).NotTo(Equal(0))
})
- It("should Save", func() {
+ It("should Save", Label("NonRedisEnterprise"), func() {
// workaround for "ERR Background save already in progress"
Eventually(func() string {
return client.Save(ctx).Val()
}, "10s").Should(Equal("OK"))
})
- It("should SlaveOf", func() {
+ It("should SlaveOf", Label("NonRedisEnterprise"), func() {
slaveOf := client.SlaveOf(ctx, "localhost", "8888")
Expect(slaveOf.Err()).NotTo(HaveOccurred())
Expect(slaveOf.Val()).To(Equal("OK"))
@@ -244,10 +404,10 @@ var _ = Describe("Commands", func() {
Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
})
- It("should Command", func() {
+ It("should Command", Label("NonRedisEnterprise"), func() {
cmds, err := client.Command(ctx).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(len(cmds)).To(BeNumerically("~", 200, 25))
+ Expect(len(cmds)).To(BeNumerically("~", 240, 25))
cmd := cmds["mget"]
Expect(cmd.Name).To(Equal("mget"))
@@ -260,16 +420,65 @@ var _ = Describe("Commands", func() {
cmd = cmds["ping"]
Expect(cmd.Name).To(Equal("ping"))
Expect(cmd.Arity).To(Equal(int8(-1)))
- Expect(cmd.Flags).To(ContainElement("stale"))
Expect(cmd.Flags).To(ContainElement("fast"))
Expect(cmd.FirstKeyPos).To(Equal(int8(0)))
Expect(cmd.LastKeyPos).To(Equal(int8(0)))
Expect(cmd.StepCount).To(Equal(int8(0)))
})
+
+ It("should return all command names", func() {
+ cmdList := client.CommandList(ctx, nil)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ Expect(cmdNames).NotTo(BeEmpty())
+
+ // Assert that some expected commands are present in the list
+ Expect(cmdNames).To(ContainElement("get"))
+ Expect(cmdNames).To(ContainElement("set"))
+ Expect(cmdNames).To(ContainElement("hset"))
+ })
+
+ It("should filter commands by module", func() {
+ filter := &redis.FilterBy{
+ Module: "JSON",
+ }
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ Expect(cmdList.Val()).To(HaveLen(0))
+ })
+
+ It("should filter commands by ACL category", func() {
+ filter := &redis.FilterBy{
+ ACLCat: "admin",
+ }
+
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ // Assert that the returned list only contains commands from the admin ACL category
+ Expect(len(cmdNames)).To(BeNumerically(">", 10))
+ })
+
+ It("should filter commands by pattern", func() {
+ filter := &redis.FilterBy{
+ Pattern: "*GET*",
+ }
+ cmdList := client.CommandList(ctx, filter)
+ Expect(cmdList.Err()).NotTo(HaveOccurred())
+ cmdNames := cmdList.Val()
+
+ // Assert that the returned list only contains commands that match the given pattern
+ Expect(cmdNames).To(ContainElement("get"))
+ Expect(cmdNames).To(ContainElement("getbit"))
+ Expect(cmdNames).To(ContainElement("getrange"))
+ Expect(cmdNames).NotTo(ContainElement("set"))
+ })
})
Describe("debugging", func() {
- It("should DebugObject", func() {
+ PIt("should DebugObject", func() {
err := client.DebugObject(ctx, "foo").Err()
Expect(err).To(MatchError("ERR no such key"))
@@ -380,17 +589,28 @@ var _ = Describe("Commands", func() {
})
It("should ExpireAt", func() {
- set := client.Set(ctx, "key", "Hello", 0)
- Expect(set.Err()).NotTo(HaveOccurred())
- Expect(set.Val()).To(Equal("OK"))
+ setCmd := client.Set(ctx, "key", "Hello", 0)
+ Expect(setCmd.Err()).NotTo(HaveOccurred())
+ Expect(setCmd.Val()).To(Equal("OK"))
n, err := client.Exists(ctx, "key").Result()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(1)))
- expireAt := client.ExpireAt(ctx, "key", time.Now().Add(-time.Hour))
- Expect(expireAt.Err()).NotTo(HaveOccurred())
- Expect(expireAt.Val()).To(Equal(true))
+ // Check correct expiration time is set in the future
+ expireAt := time.Now().Add(time.Minute)
+ expireAtCmd := client.ExpireAt(ctx, "key", expireAt)
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
+
+ timeCmd := client.ExpireTime(ctx, "key")
+ Expect(timeCmd.Err()).NotTo(HaveOccurred())
+ Expect(timeCmd.Val().Seconds()).To(BeNumerically("==", expireAt.Unix()))
+
+ // Check correct expiration in the past
+ expireAtCmd = client.ExpireAt(ctx, "key", time.Now().Add(-time.Hour))
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
n, err = client.Exists(ctx, "key").Result()
Expect(err).NotTo(HaveOccurred())
@@ -415,7 +635,7 @@ var _ = Describe("Commands", func() {
Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "three", "two"}))
})
- It("should Migrate", func() {
+ It("should Migrate", Label("NonRedisEnterprise"), func() {
migrate := client.Migrate(ctx, "localhost", redisSecondaryPort, "key", 0, 0)
Expect(migrate.Err()).NotTo(HaveOccurred())
Expect(migrate.Val()).To(Equal("NOKEY"))
@@ -429,7 +649,7 @@ var _ = Describe("Commands", func() {
Expect(migrate.Val()).To(Equal(""))
})
- It("should Move", func() {
+ It("should Move", Label("NonRedisEnterprise"), func() {
move := client.Move(ctx, "key", 2)
Expect(move.Err()).NotTo(HaveOccurred())
Expect(move.Val()).To(Equal(false))
@@ -456,7 +676,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("hello"))
})
- It("should Object", func() {
+ It("should Object", Label("NonRedisEnterprise"), func() {
start := time.Now()
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
@@ -466,6 +686,11 @@ var _ = Describe("Commands", func() {
Expect(refCount.Err()).NotTo(HaveOccurred())
Expect(refCount.Val()).To(Equal(int64(1)))
+ client.ConfigSet(ctx, "maxmemory-policy", "volatile-lfu")
+ freq := client.ObjectFreq(ctx, "key")
+ Expect(freq.Err()).NotTo(HaveOccurred())
+ client.ConfigSet(ctx, "maxmemory-policy", "noeviction") // default
+
err := client.ObjectEncoding(ctx, "key").Err()
Expect(err).NotTo(HaveOccurred())
@@ -477,7 +702,7 @@ var _ = Describe("Commands", func() {
// if too much time (>1s) is used during command execution, it may also cause the test to fail.
// so the ObjectIdleTime result should be <=now-start+1s
// link: https://github.com/redis/redis/blob/5b48d900498c85bbf4772c1d466c214439888115/src/object.c#L1265-L1272
- Expect(idleTime.Val()).To(BeNumerically("<=", time.Now().Sub(start)+time.Second))
+ Expect(idleTime.Val()).To(BeNumerically("<=", time.Since(start)+time.Second))
})
It("should Persist", func() {
@@ -540,6 +765,27 @@ var _ = Describe("Commands", func() {
Expect(pttl.Val()).To(BeNumerically("~", expiration, 100*time.Millisecond))
})
+ It("should PExpireTime", func() {
+ // The command returns -1 if the key exists but has no associated expiration time.
+ // The command returns -2 if the key does not exist.
+ pExpireTime := client.PExpireTime(ctx, "key")
+ Expect(pExpireTime.Err()).NotTo(HaveOccurred())
+ Expect(pExpireTime.Val() < 0).To(Equal(true))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ timestamp := time.Now().Add(time.Minute)
+ expireAt := client.PExpireAt(ctx, "key", timestamp)
+ Expect(expireAt.Err()).NotTo(HaveOccurred())
+ Expect(expireAt.Val()).To(Equal(true))
+
+ pExpireTime = client.PExpireTime(ctx, "key")
+ Expect(pExpireTime.Err()).NotTo(HaveOccurred())
+ Expect(pExpireTime.Val().Milliseconds()).To(BeNumerically("==", timestamp.UnixMilli()))
+ })
+
It("should PTTL", func() {
set := client.Set(ctx, "key", "Hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
@@ -569,7 +815,7 @@ var _ = Describe("Commands", func() {
Expect(randomKey.Val()).To(Equal("key"))
})
- It("should Rename", func() {
+ It("should Rename", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -583,7 +829,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("hello"))
})
- It("should RenameNX", func() {
+ It("should RenameNX", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -640,6 +886,28 @@ var _ = Describe("Commands", func() {
Expect(val).To(Equal("hello"))
})
+ It("should Sort RO", func() {
+ size, err := client.LPush(ctx, "list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush(ctx, "list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush(ctx, "list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ els, err := client.SortRO(ctx, "list", &redis.Sort{
+ Offset: 0,
+ Count: 2,
+ Order: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"1", "2"}))
+ })
+
It("should Sort", func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
@@ -662,7 +930,7 @@ var _ = Describe("Commands", func() {
Expect(els).To(Equal([]string{"1", "2"}))
})
- It("should Sort and Get", func() {
+ It("should Sort and Get", Label("NonRedisEnterprise"), func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
Expect(size).To(Equal(int64(1)))
@@ -695,7 +963,7 @@ var _ = Describe("Commands", func() {
}
})
- It("should Sort and Store", func() {
+ It("should Sort and Store", Label("NonRedisEnterprise"), func() {
size, err := client.LPush(ctx, "list", "1").Result()
Expect(err).NotTo(HaveOccurred())
Expect(size).To(Equal(int64(1)))
@@ -735,7 +1003,30 @@ var _ = Describe("Commands", func() {
Expect(touch.Val()).To(Equal(int64(2)))
})
+ It("should ExpireTime", func() {
+ // The command returns -1 if the key exists but has no associated expiration time.
+ // The command returns -2 if the key does not exist.
+ expireTimeCmd := client.ExpireTime(ctx, "key")
+ Expect(expireTimeCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireTimeCmd.Val() < 0).To(Equal(true))
+
+ set := client.Set(ctx, "key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expireAt := time.Now().Add(time.Minute)
+ expireAtCmd := client.ExpireAt(ctx, "key", expireAt)
+ Expect(expireAtCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireAtCmd.Val()).To(Equal(true))
+
+ expireTimeCmd = client.ExpireTime(ctx, "key")
+ Expect(expireTimeCmd.Err()).NotTo(HaveOccurred())
+ Expect(expireTimeCmd.Val().Seconds()).To(BeNumerically("==", expireAt.Unix()))
+ })
+
It("should TTL", func() {
+ // The command returns -1 if the key exists but has no associated expire
+ // The command returns -2 if the key does not exist.
ttl := client.TTL(ctx, "key")
Expect(ttl.Err()).NotTo(HaveOccurred())
Expect(ttl.Val() < 0).To(Equal(true))
@@ -815,7 +1106,7 @@ var _ = Describe("Commands", func() {
It("should ZScan", func() {
for i := 0; i < 1000; i++ {
- err := client.ZAdd(ctx, "myset", &redis.Z{
+ err := client.ZAdd(ctx, "myset", redis.Z{
Score: float64(i),
Member: fmt.Sprintf("member%d", i),
}).Err()
@@ -835,13 +1126,13 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(0)))
- append := client.Append(ctx, "key", "Hello")
- Expect(append.Err()).NotTo(HaveOccurred())
- Expect(append.Val()).To(Equal(int64(5)))
+ appendRes := client.Append(ctx, "key", "Hello")
+ Expect(appendRes.Err()).NotTo(HaveOccurred())
+ Expect(appendRes.Val()).To(Equal(int64(5)))
- append = client.Append(ctx, "key", " World")
- Expect(append.Err()).NotTo(HaveOccurred())
- Expect(append.Val()).To(Equal(int64(11)))
+ appendRes = client.Append(ctx, "key", " World")
+ Expect(appendRes.Err()).NotTo(HaveOccurred())
+ Expect(appendRes.Val()).To(Equal(int64(11)))
get := client.Get(ctx, "key")
Expect(get.Err()).NotTo(HaveOccurred())
@@ -872,7 +1163,7 @@ var _ = Describe("Commands", func() {
Expect(bitCount.Val()).To(Equal(int64(6)))
})
- It("should BitOpAnd", func() {
+ It("should BitOpAnd", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "1", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -890,7 +1181,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("0"))
})
- It("should BitOpOr", func() {
+ It("should BitOpOr", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "1", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -908,7 +1199,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("1"))
})
- It("should BitOpXor", func() {
+ It("should BitOpXor", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "\xff", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -926,7 +1217,7 @@ var _ = Describe("Commands", func() {
Expect(get.Val()).To(Equal("\xf0"))
})
- It("should BitOpNot", func() {
+ It("should BitOpNot", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key1", "\x00", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -981,10 +1272,41 @@ var _ = Describe("Commands", func() {
Expect(pos).To(Equal(int64(-1)))
})
+ It("should BitPosSpan", func() {
+ err := client.Set(ctx, "mykey", "\x00\xff\x00", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ pos, err := client.BitPosSpan(ctx, "mykey", 0, 1, 3, "byte").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPosSpan(ctx, "mykey", 0, 1, 3, "bit").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(1)))
+ })
+
It("should BitField", func() {
nn, err := client.BitField(ctx, "mykey", "INCRBY", "i5", 100, 1, "GET", "u4", 0).Result()
Expect(err).NotTo(HaveOccurred())
Expect(nn).To(Equal([]int64{1, 0}))
+
+ nn, err = client.BitField(ctx, "mykey", "set", "i1", 1, 1, "GET", "u4", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0, 4}))
+ })
+
+ It("should BitFieldRO", func() {
+ nn, err := client.BitField(ctx, "mykey", "SET", "u8", 8, 255).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0}))
+
+ nn, err = client.BitFieldRO(ctx, "mykey", "u8", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0}))
+
+ nn, err = client.BitFieldRO(ctx, "mykey", "u8", 0, "u4", 8, "u4", 12, "u4", 13).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nn).To(Equal([]int64{0, 15, 15, 14}))
})
It("should Decr", func() {
@@ -1170,25 +1492,61 @@ var _ = Describe("Commands", func() {
mGet := client.MGet(ctx, "key1", "key2", "_")
Expect(mGet.Err()).NotTo(HaveOccurred())
Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil}))
+
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ }
+ mSet = client.MSet(ctx, &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(mSet.Err()).NotTo(HaveOccurred())
+ Expect(mSet.Val()).To(Equal("OK"))
+
+ mGet = client.MGet(ctx, "set1", "set2", "set3", "set4")
+ Expect(mGet.Err()).NotTo(HaveOccurred())
+ Expect(mGet.Val()).To(Equal([]interface{}{
+ "val1",
+ "1024",
+ strconv.Itoa(int(2 * time.Millisecond.Nanoseconds())),
+ "",
+ }))
})
It("should scan Mget", func() {
- err := client.MSet(ctx, "key1", "hello1", "key2", 123).Err()
+ now := time.Now()
+
+ err := client.MSet(ctx, "key1", "hello1", "key2", 123, "time", now.Format(time.RFC3339Nano)).Err()
Expect(err).NotTo(HaveOccurred())
- res := client.MGet(ctx, "key1", "key2", "_")
+ res := client.MGet(ctx, "key1", "key2", "_", "time")
Expect(res.Err()).NotTo(HaveOccurred())
type data struct {
- Key1 string `redis:"key1"`
- Key2 int `redis:"key2"`
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time TimeValue `redis:"time"`
}
var d data
Expect(res.Scan(&d)).NotTo(HaveOccurred())
- Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ Expect(d.Time.UnixNano()).To(Equal(now.UnixNano()))
+ d.Time.Time = time.Time{}
+ Expect(d).To(Equal(data{
+ Key1: "hello1",
+ Key2: 123,
+ Time: TimeValue{Time: time.Time{}},
+ }))
})
- It("should MSetNX", func() {
+ It("should MSetNX", Label("NonRedisEnterprise"), func() {
mSetNX := client.MSetNX(ctx, "key1", "hello1", "key2", "hello2")
Expect(mSetNX.Err()).NotTo(HaveOccurred())
Expect(mSetNX.Val()).To(Equal(true))
@@ -1196,6 +1554,25 @@ var _ = Describe("Commands", func() {
mSetNX = client.MSetNX(ctx, "key2", "hello1", "key3", "hello2")
Expect(mSetNX.Err()).NotTo(HaveOccurred())
Expect(mSetNX.Val()).To(Equal(false))
+
+ // set struct
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ }
+ mSetNX = client.MSetNX(ctx, &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(true))
})
It("should SetWithArgs with TTL", func() {
@@ -1297,7 +1674,7 @@ var _ = Describe("Commands", func() {
Get: true,
}
val, err := client.SetArgs(ctx, "key", "hello", args).Result()
- Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(err).To(Equal(redis.Nil))
Expect(val).To(Equal(""))
})
@@ -1335,7 +1712,7 @@ var _ = Describe("Commands", func() {
Get: true,
}
val, err := client.SetArgs(ctx, "key", "hello", args).Result()
- Expect(err).To(Equal(proto.RedisError("ERR syntax error")))
+ Expect(err).To(Equal(redis.Nil))
Expect(val).To(Equal(""))
})
@@ -1496,7 +1873,7 @@ var _ = Describe("Commands", func() {
})
It("should SetEX", func() {
- err := client.SetEX(ctx, "key", "hello", 1*time.Second).Err()
+ err := client.SetEx(ctx, "key", "hello", 1*time.Second).Err()
Expect(err).NotTo(HaveOccurred())
val, err := client.Get(ctx, "key").Result()
@@ -1634,7 +2011,7 @@ var _ = Describe("Commands", func() {
Expect(strLen.Val()).To(Equal(int64(0)))
})
- It("should Copy", func() {
+ It("should Copy", Label("NonRedisEnterprise"), func() {
set := client.Set(ctx, "key", "hello", 0)
Expect(set.Err()).NotTo(HaveOccurred())
Expect(set.Val()).To(Equal("OK"))
@@ -1659,6 +2036,105 @@ var _ = Describe("Commands", func() {
replace := client.Copy(ctx, "newKey", "key", redisOptions().DB, true)
Expect(replace.Val()).To(Equal(int64(1)))
})
+
+ It("should acl dryrun", func() {
+ dryRun := client.ACLDryRun(ctx, "default", "get", "randomKey")
+ Expect(dryRun.Err()).NotTo(HaveOccurred())
+ Expect(dryRun.Val()).To(Equal("OK"))
+ })
+
+ It("should fail module loadex", Label("NonRedisEnterprise"), func() {
+ dryRun := client.ModuleLoadex(ctx, &redis.ModuleLoadexConfig{
+ Path: "/path/to/non-existent-library.so",
+ Conf: map[string]interface{}{
+ "param1": "value1",
+ },
+ Args: []interface{}{
+ "arg1",
+ },
+ })
+ Expect(dryRun.Err()).To(HaveOccurred())
+ Expect(dryRun.Err().Error()).To(Equal("ERR Error loading the extension. Please check the server logs."))
+ })
+
+ It("converts the module loadex configuration to a slice of arguments correctly", func() {
+ conf := &redis.ModuleLoadexConfig{
+ Path: "/path/to/your/module.so",
+ Conf: map[string]interface{}{
+ "param1": "value1",
+ },
+ Args: []interface{}{
+ "arg1",
+ "arg2",
+ 3,
+ },
+ }
+
+ args := conf.ToArgs()
+
+ // Test if the arguments are in the correct order
+ expectedArgs := []interface{}{
+ "MODULE",
+ "LOADEX",
+ "/path/to/your/module.so",
+ "CONFIG",
+ "param1",
+ "value1",
+ "ARGS",
+ "arg1",
+ "ARGS",
+ "arg2",
+ "ARGS",
+ 3,
+ }
+
+ Expect(args).To(Equal(expectedArgs))
+ })
+
+ It("should ACL LOG", Label("NonRedisEnterprise"), func() {
+ err := client.Do(ctx, "acl", "setuser", "test", ">test", "on", "allkeys", "+get").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ clientAcl := redis.NewClient(redisOptions())
+ clientAcl.Options().Username = "test"
+ clientAcl.Options().Password = "test"
+ clientAcl.Options().DB = 0
+ _ = clientAcl.Set(ctx, "mystring", "foo", 0).Err()
+ _ = clientAcl.HSet(ctx, "myhash", "foo", "bar").Err()
+ _ = clientAcl.SAdd(ctx, "myset", "foo", "bar").Err()
+
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(4))
+
+ for _, entry := range logEntries {
+ Expect(entry.Reason).To(Equal("command"))
+ Expect(entry.Context).To(Equal("toplevel"))
+ Expect(entry.Object).NotTo(BeEmpty())
+ Expect(entry.Username).To(Equal("test"))
+ Expect(entry.AgeSeconds).To(BeNumerically(">=", 0))
+ Expect(entry.ClientInfo).NotTo(BeNil())
+ Expect(entry.EntryID).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampCreated).To(BeNumerically(">=", 0))
+ Expect(entry.TimestampLastUpdated).To(BeNumerically(">=", 0))
+ }
+
+ limitedLogEntries, err := client.ACLLog(ctx, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(limitedLogEntries)).To(Equal(2))
+ })
+
+ It("should ACL LOG RESET", Label("NonRedisEnterprise"), func() {
+ // Call ACL LOG RESET
+ resetCmd := client.ACLLogReset(ctx)
+ Expect(resetCmd.Err()).NotTo(HaveOccurred())
+ Expect(resetCmd.Val()).To(Equal("OK"))
+
+ // Verify that the log is empty after the reset
+ logEntries, err := client.ACLLog(ctx, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(logEntries)).To(Equal(0))
+ })
})
Describe("hashes", func() {
@@ -1713,19 +2189,47 @@ var _ = Describe("Commands", func() {
})
It("should scan", func() {
- err := client.HMSet(ctx, "hash", "key1", "hello1", "key2", 123).Err()
+ now := time.Now()
+
+ err := client.HMSet(ctx, "hash", "key1", "hello1", "key2", 123, "time", now.Format(time.RFC3339Nano)).Err()
Expect(err).NotTo(HaveOccurred())
res := client.HGetAll(ctx, "hash")
Expect(res.Err()).NotTo(HaveOccurred())
type data struct {
- Key1 string `redis:"key1"`
- Key2 int `redis:"key2"`
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time TimeValue `redis:"time"`
}
var d data
Expect(res.Scan(&d)).NotTo(HaveOccurred())
- Expect(d).To(Equal(data{Key1: "hello1", Key2: 123}))
+ Expect(d.Time.UnixNano()).To(Equal(now.UnixNano()))
+ d.Time.Time = time.Time{}
+ Expect(d).To(Equal(data{
+ Key1: "hello1",
+ Key2: 123,
+ Time: TimeValue{Time: time.Time{}},
+ }))
+
+ type data2 struct {
+ Key1 string `redis:"key1"`
+ Key2 int `redis:"key2"`
+ Time time.Time `redis:"time"`
+ }
+ err = client.HSet(ctx, "hash", &data2{
+ Key1: "hello2",
+ Key2: 200,
+ Time: now,
+ }).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var d2 data2
+ err = client.HMGet(ctx, "hash", "key1", "key2", "time").Scan(&d2)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(d2.Key1).To(Equal("hello2"))
+ Expect(d2.Key2).To(Equal(200))
+ Expect(d2.Time.Unix()).To(Equal(now.Unix()))
})
It("should HIncrBy", func() {
@@ -1827,6 +2331,52 @@ var _ = Describe("Commands", func() {
hGet := client.HGet(ctx, "hash", "key")
Expect(hGet.Err()).NotTo(HaveOccurred())
Expect(hGet.Val()).To(Equal("hello"))
+
+ // set struct
+ // MSet struct
+ type set struct {
+ Set1 string `redis:"set1"`
+ Set2 int16 `redis:"set2"`
+ Set3 time.Duration `redis:"set3"`
+ Set4 interface{} `redis:"set4"`
+ Set5 map[string]interface{} `redis:"-"`
+ Set6 string `redis:"set6,omitempty"`
+ }
+
+ hSet = client.HSet(ctx, "hash", &set{
+ Set1: "val1",
+ Set2: 1024,
+ Set3: 2 * time.Millisecond,
+ Set4: nil,
+ Set5: map[string]interface{}{"k1": 1},
+ })
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(4)))
+
+ hMGet := client.HMGet(ctx, "hash", "set1", "set2", "set3", "set4", "set5", "set6")
+ Expect(hMGet.Err()).NotTo(HaveOccurred())
+ Expect(hMGet.Val()).To(Equal([]interface{}{
+ "val1",
+ "1024",
+ strconv.Itoa(int(2 * time.Millisecond.Nanoseconds())),
+ "",
+ nil,
+ nil,
+ }))
+
+ hSet = client.HSet(ctx, "hash2", &set{
+ Set1: "val2",
+ Set6: "val",
+ })
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(int64(5)))
+
+ hMGet = client.HMGet(ctx, "hash2", "set1", "set6")
+ Expect(hMGet.Err()).NotTo(HaveOccurred())
+ Expect(hMGet.Val()).To(Equal([]interface{}{
+ "val2",
+ "val",
+ }))
})
It("should HSetNX", func() {
@@ -1865,23 +2415,25 @@ var _ = Describe("Commands", func() {
err = client.HSet(ctx, "hash", "key2", "hello2").Err()
Expect(err).NotTo(HaveOccurred())
- v := client.HRandField(ctx, "hash", 1, false)
+ v := client.HRandField(ctx, "hash", 1)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(Or(Equal([]string{"key1"}), Equal([]string{"key2"})))
- v = client.HRandField(ctx, "hash", 0, false)
+ v = client.HRandField(ctx, "hash", 0)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(HaveLen(0))
- var slice []string
- err = client.HRandField(ctx, "hash", 1, true).ScanSlice(&slice)
+ kv, err := client.HRandFieldWithValues(ctx, "hash", 1).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(slice).To(Or(Equal([]string{"key1", "hello1"}), Equal([]string{"key2", "hello2"})))
+ Expect(kv).To(Or(
+ Equal([]redis.KeyValue{{Key: "key1", Value: "hello1"}}),
+ Equal([]redis.KeyValue{{Key: "key2", Value: "hello2"}}),
+ ))
})
})
Describe("hyperloglog", func() {
- It("should PFMerge", func() {
+ It("should PFMerge", Label("NonRedisEnterprise"), func() {
pfAdd := client.PFAdd(ctx, "hll1", "1", "2", "3", "4", "5")
Expect(pfAdd.Err()).NotTo(HaveOccurred())
@@ -1906,7 +2458,7 @@ var _ = Describe("Commands", func() {
})
Describe("lists", func() {
- It("should BLPop", func() {
+ It("should BLPop", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list1", "a", "b", "c")
Expect(rPush.Err()).NotTo(HaveOccurred())
@@ -1960,7 +2512,7 @@ var _ = Describe("Commands", func() {
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
- It("should BRPop", func() {
+ It("should BRPop", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list1", "a", "b", "c")
Expect(rPush.Err()).NotTo(HaveOccurred())
@@ -2002,7 +2554,7 @@ var _ = Describe("Commands", func() {
}
})
- It("should BRPopLPush", func() {
+ It("should BRPopLPush", Label("NonRedisEnterprise"), func() {
_, err := client.BRPopLPush(ctx, "list1", "list2", time.Second).Result()
Expect(err).To(Equal(redis.Nil))
@@ -2014,6 +2566,86 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal("c"))
})
+ It("should LCS", Label("NonRedisEnterprise"), func() {
+ err := client.MSet(ctx, "key1", "ohmytext", "key2", "mynewtext").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ lcs, err := client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ }).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal("mytext"))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "nonexistent_key1",
+ Key2: "key2",
+ }).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Len: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Idx: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+ Expect(lcs.Matches).To(Equal([]redis.LCSMatchedPosition{
+ {
+ Key1: redis.LCSPosition{Start: 4, End: 7},
+ Key2: redis.LCSPosition{Start: 5, End: 8},
+ MatchLen: 0,
+ },
+ {
+ Key1: redis.LCSPosition{Start: 2, End: 3},
+ Key2: redis.LCSPosition{Start: 0, End: 1},
+ MatchLen: 0,
+ },
+ }))
+
+ lcs, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "key1",
+ Key2: "key2",
+ Idx: true,
+ MinMatchLen: 3,
+ WithMatchLen: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lcs.MatchString).To(Equal(""))
+ Expect(lcs.Len).To(Equal(int64(6)))
+ Expect(lcs.Matches).To(Equal([]redis.LCSMatchedPosition{
+ {
+ Key1: redis.LCSPosition{Start: 4, End: 7},
+ Key2: redis.LCSPosition{Start: 5, End: 8},
+ MatchLen: 4,
+ },
+ }))
+
+ _, err = client.Set(ctx, "keywithstringvalue", "golang", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.LPush(ctx, "keywithnonstringvalue", "somevalue").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.LCS(ctx, &redis.LCSQuery{
+ Key1: "keywithstringvalue",
+ Key2: "keywithnonstringvalue",
+ }).Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(Equal("ERR The specified keys must contain string values"))
+ })
+
It("should LIndex", func() {
lPush := client.LPush(ctx, "list", "World")
Expect(lPush.Err()).NotTo(HaveOccurred())
@@ -2048,6 +2680,120 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"Hello", "There", "World"}))
})
+ It("should LMPop", Label("NonRedisEnterprise"), func() {
+ err := client.LPush(ctx, "list1", "one", "two", "three", "four", "five").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LPush(ctx, "list2", "a", "b", "c", "d", "e").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, val, err := client.LMPop(ctx, "left", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"five", "four", "three"}))
+
+ key, val, err = client.LMPop(ctx, "right", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"one", "two"}))
+
+ key, val, err = client.LMPop(ctx, "left", 1, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"e"}))
+
+ key, val, err = client.LMPop(ctx, "right", 10, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"a", "b", "c", "d"}))
+
+ err = client.LMPop(ctx, "left", 10, "list1", "list2").Err()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.Set(ctx, "list3", 1024, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LMPop(ctx, "left", 10, "list1", "list2", "list3").Err()
+ Expect(err.Error()).To(Equal("WRONGTYPE Operation against a key holding the wrong kind of value"))
+
+ err = client.LMPop(ctx, "right", 0, "list1", "list2").Err()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should BLMPop", Label("NonRedisEnterprise"), func() {
+ err := client.LPush(ctx, "list1", "one", "two", "three", "four", "five").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.LPush(ctx, "list2", "a", "b", "c", "d", "e").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, val, err := client.BLMPop(ctx, 0, "left", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"five", "four", "three"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "right", 3, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list1"))
+ Expect(val).To(Equal([]string{"one", "two"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "left", 1, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"e"}))
+
+ key, val, err = client.BLMPop(ctx, 0, "right", 10, "list1", "list2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list2"))
+ Expect(val).To(Equal([]string{"a", "b", "c", "d"}))
+ })
+
+ It("should BLMPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ key, val, err := client.BLMPop(ctx, 0, "left", 1, "list_list").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list_list"))
+ Expect(val).To(Equal([]string{"a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BLMPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ _, err := client.LPush(ctx, "list_list", "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BLMPop is still blocked")
+ }
+ })
+
+ It("should BLMPop timeout", func() {
+ _, val, err := client.BLMPop(ctx, time.Second, "left", 1, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
It("should LLen", func() {
lPush := client.LPush(ctx, "list", "World")
Expect(lPush.Err()).NotTo(HaveOccurred())
@@ -2308,7 +3054,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
})
- It("should RPopLPush", func() {
+ It("should RPopLPush", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "list", "one")
Expect(rPush.Err()).NotTo(HaveOccurred())
rPush = client.RPush(ctx, "list", "two")
@@ -2377,7 +3123,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{}))
})
- It("should LMove", func() {
+ It("should LMove", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "lmove1", "ichi")
Expect(rPush.Err()).NotTo(HaveOccurred())
Expect(rPush.Val()).To(Equal(int64(1)))
@@ -2399,7 +3145,7 @@ var _ = Describe("Commands", func() {
Expect(lRange.Val()).To(Equal([]string{"san"}))
})
- It("should BLMove", func() {
+ It("should BLMove", Label("NonRedisEnterprise"), func() {
rPush := client.RPush(ctx, "blmove1", "ichi")
Expect(rPush.Err()).NotTo(HaveOccurred())
Expect(rPush.Val()).To(Equal(int64(1)))
@@ -2466,7 +3212,7 @@ var _ = Describe("Commands", func() {
Expect(sCard.Val()).To(Equal(int64(2)))
})
- It("should SDiff", func() {
+ It("should SDiff", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2486,7 +3232,7 @@ var _ = Describe("Commands", func() {
Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"}))
})
- It("should SDiffStore", func() {
+ It("should SDiffStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2510,7 +3256,7 @@ var _ = Describe("Commands", func() {
Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"}))
})
- It("should SInter", func() {
+ It("should SInter", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2530,7 +3276,37 @@ var _ = Describe("Commands", func() {
Expect(sInter.Val()).To(Equal([]string{"c"}))
})
- It("should SInterStore", func() {
+ It("should SInterCard", Label("NonRedisEnterprise"), func() {
+ sAdd := client.SAdd(ctx, "set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd(ctx, "set2", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd(ctx, "set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ // limit 0 means no limit,see https://redis.io/commands/sintercard/ for more details
+ sInterCard := client.SInterCard(ctx, 0, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+
+ sInterCard = client.SInterCard(ctx, 1, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(1)))
+
+ sInterCard = client.SInterCard(ctx, 3, "set1", "set2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should SInterStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2598,7 +3374,7 @@ var _ = Describe("Commands", func() {
Expect(sMembersMap.Val()).To(Equal(map[string]struct{}{"Hello": {}, "World": {}}))
})
- It("should SMove", func() {
+ It("should SMove", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "one")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "two")
@@ -2706,7 +3482,7 @@ var _ = Describe("Commands", func() {
Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
})
- It("should SUnion", func() {
+ It("should SUnion", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2726,7 +3502,7 @@ var _ = Describe("Commands", func() {
Expect(sUnion.Val()).To(HaveLen(5))
})
- It("should SUnionStore", func() {
+ It("should SUnionStore", Label("NonRedisEnterprise"), func() {
sAdd := client.SAdd(ctx, "set1", "a")
Expect(sAdd.Err()).NotTo(HaveOccurred())
sAdd = client.SAdd(ctx, "set1", "b")
@@ -2752,18 +3528,18 @@ var _ = Describe("Commands", func() {
})
Describe("sorted sets", func() {
- It("should BZPopMax", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should BZPopMax", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -2807,7 +3583,7 @@ var _ = Describe("Commands", func() {
// ok
}
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{
Member: "a",
Score: 1,
})
@@ -2834,18 +3610,18 @@ var _ = Describe("Commands", func() {
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
- It("should BZPopMin", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should BZPopMin", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -2889,7 +3665,7 @@ var _ = Describe("Commands", func() {
// ok
}
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{
Member: "a",
Score: 1,
})
@@ -2917,28 +3693,28 @@ var _ = Describe("Commands", func() {
})
It("should ZAdd", func() {
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "uno",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "two",
}).Result()
@@ -2960,28 +3736,28 @@ var _ = Describe("Commands", func() {
})
It("should ZAdd bytes", func() {
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: []byte("one"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: []byte("uno"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: []byte("two"),
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: []byte("two"),
}).Result()
@@ -3002,7 +3778,7 @@ var _ = Describe("Commands", func() {
}}))
})
- It("should ZAddArgs", func() {
+ It("should ZAddArgsGTAndLT", func() {
// Test only the GT+LT options.
added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
GT: true,
@@ -3038,8 +3814,78 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- It("should ZAddNX", func() {
- added, err := client.ZAddNX(ctx, "zset", &redis.Z{
+ It("should ZAddArgsLT", func() {
+ added, err := client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 3,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddLT(ctx, "zset", redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
+ })
+
+ It("should ZAddArgsGT", func() {
+ added, err := client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 2,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
+
+ added, err = client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 3,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "one"}}))
+
+ added, err = client.ZAddGT(ctx, "zset", redis.Z{
+ Score: 1,
+ Member: "one",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{Score: 3, Member: "one"}}))
+ })
+
+ It("should ZAddArgsNX", func() {
+ added, err := client.ZAddNX(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
@@ -3050,7 +3896,7 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- added, err = client.ZAddNX(ctx, "zset", &redis.Z{
+ added, err = client.ZAddNX(ctx, "zset", redis.Z{
Score: 2,
Member: "one",
}).Result()
@@ -3062,8 +3908,8 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- It("should ZAddXX", func() {
- added, err := client.ZAddXX(ctx, "zset", &redis.Z{
+ It("should ZAddArgsXX", func() {
+ added, err := client.ZAddXX(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
@@ -3074,14 +3920,14 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err = client.ZAdd(ctx, "zset", &redis.Z{
+ added, err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- added, err = client.ZAddXX(ctx, "zset", &redis.Z{
+ added, err = client.ZAddXX(ctx, "zset", redis.Z{
Score: 2,
Member: "one",
}).Result()
@@ -3093,28 +3939,33 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZAddCh", func() {
- changed, err := client.ZAddCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
- changed, err = client.ZAddCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
})
- // TODO: remove in v9.
- It("should ZAddNXCh", func() {
- changed, err := client.ZAddNXCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsNXCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
@@ -3123,9 +3974,12 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- changed, err = client.ZAddNXCh(ctx, "zset", &redis.Z{
- Score: 2,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 2, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
@@ -3138,11 +3992,13 @@ var _ = Describe("Commands", func() {
}}))
})
- // TODO: remove in v9.
- It("should ZAddXXCh", func() {
- changed, err := client.ZAddXXCh(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsXXCh", func() {
+ changed, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(0)))
@@ -3151,16 +4007,19 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- changed, err = client.ZAddXXCh(ctx, "zset", &redis.Z{
- Score: 2,
- Member: "one",
+ changed, err = client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Ch: true,
+ Members: []redis.Z{
+ {Score: 2, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(changed).To(Equal(int64(1)))
@@ -3170,11 +4029,11 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncr", func() {
- score, err := client.ZIncr(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncr", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(1)))
@@ -3183,9 +4042,10 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- score, err = client.ZIncr(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(2)))
@@ -3195,11 +4055,12 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 2, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncrNX", func() {
- score, err := client.ZIncrNX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncrNX", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(1)))
@@ -3208,9 +4069,11 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
- score, err = client.ZIncrNX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ NX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).To(Equal(redis.Nil))
Expect(score).To(Equal(float64(0)))
@@ -3220,11 +4083,12 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{{Score: 1, Member: "one"}}))
})
- // TODO: remove in v9.
- It("should ZIncrXX", func() {
- score, err := client.ZIncrXX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ It("should ZAddArgsIncrXX", func() {
+ score, err := client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).To(Equal(redis.Nil))
Expect(score).To(Equal(float64(0)))
@@ -3233,16 +4097,18 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
Expect(vals).To(BeEmpty())
- added, err := client.ZAdd(ctx, "zset", &redis.Z{
+ added, err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(added).To(Equal(int64(1)))
- score, err = client.ZIncrXX(ctx, "zset", &redis.Z{
- Score: 1,
- Member: "one",
+ score, err = client.ZAddArgsIncr(ctx, "zset", redis.ZAddArgs{
+ XX: true,
+ Members: []redis.Z{
+ {Score: 1, Member: "one"},
+ },
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(score).To(Equal(float64(2)))
@@ -3253,12 +4119,12 @@ var _ = Describe("Commands", func() {
})
It("should ZCard", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3270,17 +4136,17 @@ var _ = Describe("Commands", func() {
})
It("should ZCount", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3300,12 +4166,12 @@ var _ = Describe("Commands", func() {
})
It("should ZIncrBy", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3326,23 +4192,23 @@ var _ = Describe("Commands", func() {
}}))
})
- It("should ZInterStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{
+ It("should ZInterStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{
+ err = client.ZAdd(ctx, "zset1", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset3", &redis.Z{Score: 3, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset3", redis.Z{Score: 3, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
n, err := client.ZInterStore(ctx, "out", &redis.ZStore{
@@ -3363,17 +4229,218 @@ var _ = Describe("Commands", func() {
}}))
})
+ It("should ZMPop", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err := client.ZMPop(ctx, "min", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ _, _, err = client.ZMPop(ctx, "min", 1, "nosuchkey").Result()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.ZMPop(ctx, "min", 1, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ key, elems, err = client.ZMPop(ctx, "max", 10, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 4, Member: "four"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 5, Member: "five"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 6, Member: "six"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.ZMPop(ctx, "min", 10, "myzset", "myzset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 4,
+ Member: "four",
+ }, {
+ Score: 5,
+ Member: "five",
+ }, {
+ Score: 6,
+ Member: "six",
+ }}))
+ })
+
+ It("should BZMPop", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err := client.BZMPop(ctx, 0, "min", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+ key, elems, err = client.BZMPop(ctx, 0, "max", 1, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }}))
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "zset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 2,
+ Member: "two",
+ }}))
+
+ key, elems, err = client.BZMPop(ctx, 0, "max", 10, "zset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("zset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 3,
+ Member: "three",
+ }, {
+ Score: 2,
+ Member: "two",
+ }, {
+ Score: 1,
+ Member: "one",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "myzset").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 4, Member: "four"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "myzset2", redis.Z{Score: 5, Member: "five"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ key, elems, err = client.BZMPop(ctx, 0, "min", 10, "myzset", "myzset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("myzset2"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 4,
+ Member: "four",
+ }, {
+ Score: 5,
+ Member: "five",
+ }}))
+ })
+
+ It("should BZMPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ key, elems, err := client.BZMPop(ctx, 0, "min", 1, "list_list").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(key).To(Equal("list_list"))
+ Expect(elems).To(Equal([]redis.Z{{
+ Score: 1,
+ Member: "one",
+ }}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BZMPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ err := client.ZAdd(ctx, "list_list", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BZMPop is still blocked")
+ }
+ })
+
+ It("should BZMPop timeout", func() {
+ _, val, err := client.BZMPop(ctx, time.Second, "min", 1, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
It("should ZMScore", func() {
zmScore := client.ZMScore(ctx, "zset", "one", "three")
Expect(zmScore.Err()).NotTo(HaveOccurred())
Expect(zmScore.Val()).To(HaveLen(2))
Expect(zmScore.Val()[0]).To(Equal(float64(0)))
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zmScore = client.ZMScore(ctx, "zset", "one", "three")
@@ -3391,17 +4458,17 @@ var _ = Describe("Commands", func() {
})
It("should ZPopMax", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3415,7 +4482,7 @@ var _ = Describe("Commands", func() {
}}))
// adding back 3
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3431,12 +4498,12 @@ var _ = Describe("Commands", func() {
}}))
// adding back 2 & 3
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3456,17 +4523,17 @@ var _ = Describe("Commands", func() {
})
It("should ZPopMin", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 3,
Member: "three",
}).Err()
@@ -3480,7 +4547,7 @@ var _ = Describe("Commands", func() {
}}))
// adding back 1
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
@@ -3496,13 +4563,13 @@ var _ = Describe("Commands", func() {
}}))
// adding back 1 & 2
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 1,
Member: "one",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 2,
Member: "two",
}).Err()
@@ -3523,11 +4590,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRange", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRange := client.ZRange(ctx, "zset", 0, -1)
@@ -3544,11 +4611,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeWithScores", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result()
@@ -3642,11 +4709,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRangeByScore := client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{
@@ -3679,17 +4746,17 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByLex", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{
+ err := client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "a",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "b",
}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{
+ err = client.ZAdd(ctx, "zset", redis.Z{
Score: 0,
Member: "c",
}).Err()
@@ -3725,11 +4792,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRangeByScoreWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
@@ -3776,7 +4843,7 @@ var _ = Describe("Commands", func() {
Expect(vals).To(Equal([]redis.Z{}))
})
- It("should ZRangeStore", func() {
+ It("should ZRangeStore", Label("NonRedisEnterprise"), func() {
added, err := client.ZAddArgs(ctx, "zset", redis.ZAddArgs{
Members: []redis.Z{
{Score: 1, Member: "one"},
@@ -3806,11 +4873,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRank := client.ZRank(ctx, "zset", "three")
@@ -3822,12 +4889,37 @@ var _ = Describe("Commands", func() {
Expect(zRank.Val()).To(Equal(int64(0)))
})
+ It("should ZRankWithScore", func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRankWithScore := client.ZRankWithScore(ctx, "zset", "one")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 0, Score: 1}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "two")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 1, Score: 2}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "three")
+ Expect(zRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 2, Score: 3}))
+
+ zRankWithScore = client.ZRankWithScore(ctx, "zset", "four")
+ Expect(zRankWithScore.Err()).To(HaveOccurred())
+ Expect(zRankWithScore.Err()).To(Equal(redis.Nil))
+ })
+
It("should ZRem", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRem := client.ZRem(ctx, "zset", "two")
@@ -3846,11 +4938,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRemRangeByRank := client.ZRemRangeByRank(ctx, "zset", 0, 1)
@@ -3866,11 +4958,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRemRangeByScore := client.ZRemRangeByScore(ctx, "zset", "-inf", "(2")
@@ -3889,7 +4981,7 @@ var _ = Describe("Commands", func() {
})
It("should ZRemRangeByLex", func() {
- zz := []*redis.Z{
+ zz := []redis.Z{
{Score: 0, Member: "aaaa"},
{Score: 0, Member: "b"},
{Score: 0, Member: "c"},
@@ -3916,11 +5008,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRange", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRevRange := client.ZRevRange(ctx, "zset", 0, -1)
@@ -3937,11 +5029,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
val, err := client.ZRevRangeWithScores(ctx, "zset", 0, -1).Result()
@@ -3973,11 +5065,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScore", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScore(
@@ -3997,11 +5089,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByLex", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "a"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "a"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "b"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "b"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 0, Member: "c"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 0, Member: "c"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByLex(
@@ -4021,11 +5113,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScoreWithScores", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScoreWithScores(
@@ -4044,11 +5136,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRangeByScoreWithScoresMap", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
vals, err := client.ZRevRangeByScoreWithScores(
@@ -4077,11 +5169,11 @@ var _ = Describe("Commands", func() {
})
It("should ZRevRank", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
zRevRank := client.ZRevRank(ctx, "zset", "one")
@@ -4093,16 +5185,41 @@ var _ = Describe("Commands", func() {
Expect(zRevRank.Val()).To(Equal(int64(0)))
})
+ It("should ZRevRankWithScore", func() {
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ zRevRankWithScore := client.ZRevRankWithScore(ctx, "zset", "one")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 2, Score: 1}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "two")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 1, Score: 2}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "three")
+ Expect(zRevRankWithScore.Err()).NotTo(HaveOccurred())
+ Expect(zRevRankWithScore.Result()).To(Equal(redis.RankScore{Rank: 0, Score: 3}))
+
+ zRevRankWithScore = client.ZRevRankWithScore(ctx, "zset", "four")
+ Expect(zRevRankWithScore.Err()).To(HaveOccurred())
+ Expect(zRevRankWithScore.Err()).To(Equal(redis.Nil))
+ })
+
It("should ZScore", func() {
- zAdd := client.ZAdd(ctx, "zset", &redis.Z{Score: 1.001, Member: "one"})
+ zAdd := client.ZAdd(ctx, "zset", redis.Z{Score: 1.001, Member: "one"})
Expect(zAdd.Err()).NotTo(HaveOccurred())
zScore := client.ZScore(ctx, "zset", "one")
Expect(zScore.Err()).NotTo(HaveOccurred())
- Expect(zScore.Val()).To(Equal(float64(1.001)))
+ Expect(zScore.Val()).To(Equal(1.001))
})
- It("should ZUnion", func() {
+ It("should ZUnion", Label("NonRedisEnterprise"), func() {
err := client.ZAddArgs(ctx, "zset1", redis.ZAddArgs{
Members: []redis.Z{
{Score: 1, Member: "one"},
@@ -4141,17 +5258,17 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZUnionStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZUnionStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
n, err := client.ZUnionStore(ctx, "out", &redis.ZStore{
@@ -4176,33 +5293,35 @@ var _ = Describe("Commands", func() {
})
It("should ZRandMember", func() {
- err := client.ZAdd(ctx, "zset", &redis.Z{Score: 1, Member: "one"}).Err()
+ err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- v := client.ZRandMember(ctx, "zset", 1, false)
+ v := client.ZRandMember(ctx, "zset", 1)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(Or(Equal([]string{"one"}), Equal([]string{"two"})))
- v = client.ZRandMember(ctx, "zset", 0, false)
+ v = client.ZRandMember(ctx, "zset", 0)
Expect(v.Err()).NotTo(HaveOccurred())
Expect(v.Val()).To(HaveLen(0))
- var slice []string
- err = client.ZRandMember(ctx, "zset", 1, true).ScanSlice(&slice)
+ kv, err := client.ZRandMemberWithScores(ctx, "zset", 1).Result()
Expect(err).NotTo(HaveOccurred())
- Expect(slice).To(Or(Equal([]string{"one", "1"}), Equal([]string{"two", "2"})))
+ Expect(kv).To(Or(
+ Equal([]redis.Z{{Member: "one", Score: 1}}),
+ Equal([]redis.Z{{Member: "two", Score: 2}}),
+ ))
})
- It("should ZDiff", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiff", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiff(ctx, "zset1", "zset2").Result()
@@ -4210,14 +5329,14 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal([]string{"two", "three"}))
})
- It("should ZDiffWithScores", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiffWithScores", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiffWithScores(ctx, "zset1", "zset2").Result()
@@ -4234,16 +5353,16 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZInter", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZInter", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZInter(ctx, &redis.ZStore{
@@ -4253,16 +5372,42 @@ var _ = Describe("Commands", func() {
Expect(v).To(Equal([]string{"one", "two"}))
})
- It("should ZInterWithScores", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZInterCard", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // limit 0 means no limit
+ sInterCard := client.ZInterCard(ctx, 0, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+
+ sInterCard = client.ZInterCard(ctx, 1, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(1)))
+
+ sInterCard = client.ZInterCard(ctx, 3, "zset1", "zset2")
+ Expect(sInterCard.Err()).NotTo(HaveOccurred())
+ Expect(sInterCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should ZInterWithScores", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZInterWithScores(ctx, &redis.ZStore{
@@ -4283,16 +5428,16 @@ var _ = Describe("Commands", func() {
}))
})
- It("should ZDiffStore", func() {
- err := client.ZAdd(ctx, "zset1", &redis.Z{Score: 1, Member: "one"}).Err()
+ It("should ZDiffStore", Label("NonRedisEnterprise"), func() {
+ err := client.ZAdd(ctx, "zset1", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset1", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset1", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 1, Member: "one"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 1, Member: "one"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 2, Member: "two"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 2, Member: "two"}).Err()
Expect(err).NotTo(HaveOccurred())
- err = client.ZAdd(ctx, "zset2", &redis.Z{Score: 3, Member: "three"}).Err()
+ err = client.ZAdd(ctx, "zset2", redis.Z{Score: 3, Member: "three"}).Err()
Expect(err).NotTo(HaveOccurred())
v, err := client.ZDiffStore(ctx, "out1", "zset1", "zset2").Result()
Expect(err).NotTo(HaveOccurred())
@@ -4338,23 +5483,6 @@ var _ = Describe("Commands", func() {
Expect(id).To(Equal("3-0"))
})
- // TODO remove in v9.
- It("should XTrim", func() {
- n, err := client.XTrim(ctx, "stream", 0).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(3)))
- })
-
- // TODO remove in v9.
- It("should XTrimApprox", func() {
- n, err := client.XTrimApprox(ctx, "stream", 0).Result()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(3)))
- })
-
- // TODO XTrimMaxLenApprox/XTrimMinIDApprox There is a bug in the limit parameter.
- // TODO Don't test it for now.
- // TODO link: https://github.com/redis/redis/issues/9046
It("should XTrimMaxLen", func() {
n, err := client.XTrimMaxLen(ctx, "stream", 0).Result()
Expect(err).NotTo(HaveOccurred())
@@ -4396,9 +5524,6 @@ var _ = Describe("Commands", func() {
}))
})
- // TODO XAdd There is a bug in the limit parameter.
- // TODO Don't test it for now.
- // TODO link: https://github.com/redis/redis/issues/9046
It("should XAdd with MaxLen", func() {
id, err := client.XAdd(ctx, &redis.XAddArgs{
Stream: "stream",
@@ -4816,13 +5941,22 @@ var _ = Describe("Commands", func() {
res.RadixTreeNodes = 0
Expect(res).To(Equal(&redis.XInfoStream{
- Length: 3,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- Groups: 2,
- LastGeneratedID: "3-0",
- FirstEntry: redis.XMessage{ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
- LastEntry: redis.XMessage{ID: "3-0", Values: map[string]interface{}{"tres": "troix"}},
+ Length: 3,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ MaxDeletedEntryID: "0-0",
+ EntriesAdded: 3,
+ FirstEntry: redis.XMessage{
+ ID: "1-0",
+ Values: map[string]interface{}{"uno": "un"},
+ },
+ LastEntry: redis.XMessage{
+ ID: "3-0",
+ Values: map[string]interface{}{"tres": "troix"},
+ },
+ RecordedFirstEntryID: "1-0",
}))
// stream is empty
@@ -4836,13 +5970,16 @@ var _ = Describe("Commands", func() {
res.RadixTreeNodes = 0
Expect(res).To(Equal(&redis.XInfoStream{
- Length: 0,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- Groups: 2,
- LastGeneratedID: "3-0",
- FirstEntry: redis.XMessage{},
- LastEntry: redis.XMessage{},
+ Length: 0,
+ RadixTreeKeys: 0,
+ RadixTreeNodes: 0,
+ Groups: 2,
+ LastGeneratedID: "3-0",
+ MaxDeletedEntryID: "3-0",
+ EntriesAdded: 3,
+ FirstEntry: redis.XMessage{},
+ LastEntry: redis.XMessage{},
+ RecordedFirstEntryID: "0-0",
}))
})
@@ -4862,7 +5999,9 @@ var _ = Describe("Commands", func() {
}
for k3, c := range g.Consumers {
Expect(now.Sub(c.SeenTime)).To(BeNumerically("<=", maxElapsed))
+ Expect(now.Sub(c.ActiveTime)).To(BeNumerically("<=", maxElapsed))
res.Groups[k].Consumers[k3].SeenTime = time.Time{}
+ res.Groups[k].Consumers[k3].ActiveTime = time.Time{}
for k4, p := range c.Pending {
Expect(now.Sub(p.DeliveryTime)).To(BeNumerically("<=", maxElapsed))
@@ -4871,114 +6010,104 @@ var _ = Describe("Commands", func() {
}
}
- Expect(res).To(Equal(&redis.XInfoStreamFull{
- Length: 3,
- RadixTreeKeys: 0,
- RadixTreeNodes: 0,
- LastGeneratedID: "3-0",
- Entries: []redis.XMessage{
- {ID: "1-0", Values: map[string]interface{}{"uno": "un"}},
- {ID: "2-0", Values: map[string]interface{}{"dos": "deux"}},
- },
- Groups: []redis.XInfoStreamGroup{
- {
- Name: "group1",
- LastDeliveredID: "3-0",
- PelCount: 3,
- Pending: []redis.XInfoStreamGroupPending{
- {
- ID: "1-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "2-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
+ Expect(res.Groups).To(Equal([]redis.XInfoStreamGroup{
+ {
+ Name: "group1",
+ LastDeliveredID: "3-0",
+ EntriesRead: 3,
+ Lag: 0,
+ PelCount: 3,
+ Pending: []redis.XInfoStreamGroupPending{
+ {ID: "1-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "2-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "1-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "2-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
- Consumers: []redis.XInfoStreamConsumer{
- {
- Name: "consumer1",
- SeenTime: time.Time{},
- PelCount: 2,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "1-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "2-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
- },
- {
- Name: "consumer2",
- SeenTime: time.Time{},
- PelCount: 1,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "3-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
+ {
+ Name: "consumer2",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 1,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "3-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
},
- {
- Name: "group2",
- LastDeliveredID: "3-0",
- PelCount: 2,
- Pending: []redis.XInfoStreamGroupPending{
- {
- ID: "2-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "3-0",
- Consumer: "consumer1",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
- Consumers: []redis.XInfoStreamConsumer{
- {
- Name: "consumer1",
- SeenTime: time.Time{},
- PelCount: 2,
- Pending: []redis.XInfoStreamConsumerPending{
- {
- ID: "2-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- {
- ID: "3-0",
- DeliveryTime: time.Time{},
- DeliveryCount: 1,
- },
- },
+ },
+ {
+ Name: "group2",
+ LastDeliveredID: "3-0",
+ EntriesRead: 3,
+ Lag: 0,
+ PelCount: 2,
+ Pending: []redis.XInfoStreamGroupPending{
+ {ID: "2-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "3-0", Consumer: "consumer1", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ },
+ Consumers: []redis.XInfoStreamConsumer{
+ {
+ Name: "consumer1",
+ SeenTime: time.Time{},
+ ActiveTime: time.Time{},
+ PelCount: 2,
+ Pending: []redis.XInfoStreamConsumerPending{
+ {ID: "2-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
+ {ID: "3-0", DeliveryTime: time.Time{}, DeliveryCount: 1},
},
},
},
},
}))
+
+ // entries-read = nil
+ Expect(client.Del(ctx, "xinfo-stream-full-stream").Err()).NotTo(HaveOccurred())
+ id, err := client.XAdd(ctx, &redis.XAddArgs{
+ Stream: "xinfo-stream-full-stream",
+ ID: "*",
+ Values: []any{"k1", "v1"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.XGroupCreateMkStream(ctx, "xinfo-stream-full-stream", "xinfo-stream-full-group", "0").Err()).NotTo(HaveOccurred())
+ res, err = client.XInfoStreamFull(ctx, "xinfo-stream-full-stream", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(&redis.XInfoStreamFull{
+ Length: 1,
+ RadixTreeKeys: 1,
+ RadixTreeNodes: 2,
+ LastGeneratedID: id,
+ MaxDeletedEntryID: "0-0",
+ EntriesAdded: 1,
+ Entries: []redis.XMessage{{ID: id, Values: map[string]any{"k1": "v1"}}},
+ Groups: []redis.XInfoStreamGroup{
+ {
+ Name: "xinfo-stream-full-group",
+ LastDeliveredID: "0-0",
+ EntriesRead: 0,
+ Lag: 1,
+ PelCount: 0,
+ Pending: []redis.XInfoStreamGroupPending{},
+ Consumers: []redis.XInfoStreamConsumer{},
+ },
+ },
+ RecordedFirstEntryID: id,
+ }))
})
It("should XINFO GROUPS", func() {
res, err := client.XInfoGroups(ctx, "stream").Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(Equal([]redis.XInfoGroup{
- {Name: "group1", Consumers: 2, Pending: 3, LastDeliveredID: "3-0"},
- {Name: "group2", Consumers: 1, Pending: 2, LastDeliveredID: "3-0"},
+ {Name: "group1", Consumers: 2, Pending: 3, LastDeliveredID: "3-0", EntriesRead: 3},
+ {Name: "group2", Consumers: 1, Pending: 2, LastDeliveredID: "3-0", EntriesRead: 3},
}))
})
@@ -4987,10 +6116,12 @@ var _ = Describe("Commands", func() {
Expect(err).NotTo(HaveOccurred())
for i := range res {
res[i].Idle = 0
+ res[i].Inactive = 0
}
+
Expect(res).To(Equal([]redis.XInfoConsumer{
- {Name: "consumer1", Pending: 2, Idle: 0},
- {Name: "consumer2", Pending: 1, Idle: 0},
+ {Name: "consumer1", Pending: 2, Idle: 0, Inactive: 0},
+ {Name: "consumer2", Pending: 1, Idle: 0, Inactive: 0},
}))
})
})
@@ -5028,7 +6159,7 @@ var _ = Describe("Commands", func() {
Expect(res[1].Name).To(Equal("Catania"))
})
- It("should geo radius and store the result", func() {
+ It("should geo radius and store the result", Label("NonRedisEnterprise"), func() {
n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
Radius: 200,
Store: "result",
@@ -5048,7 +6179,7 @@ var _ = Describe("Commands", func() {
}))
})
- It("should geo radius and store dist", func() {
+ It("should geo radius and store dist", Label("NonRedisEnterprise"), func() {
n, err := client.GeoRadiusStore(ctx, "Sicily", 15, 37, &redis.GeoRadiusQuery{
Radius: 200,
StoreDist: "result",
@@ -5330,7 +6461,7 @@ var _ = Describe("Commands", func() {
}))
})
- It("should geo search store", func() {
+ It("should geo search store", Label("NonRedisEnterprise"), func() {
q := &redis.GeoSearchStoreQuery{
GeoSearchQuery: redis.GeoSearchQuery{
Longitude: 15,
@@ -5401,7 +6532,7 @@ var _ = Describe("Commands", func() {
{nil, "", nil},
{"hello", "hello", new(string)},
{[]byte("hello"), "hello", new([]byte)},
- {int(1), "1", new(int)},
+ {1, "1", new(int)},
{int8(1), "1", new(int8)},
{int16(1), "1", new(int16)},
{int32(1), "1", new(int32)},
@@ -5412,7 +6543,7 @@ var _ = Describe("Commands", func() {
{uint32(1), "1", new(uint32)},
{uint64(1), "1", new(uint64)},
{float32(1.0), "1", new(float32)},
- {float64(1.0), "1", new(float64)},
+ {1.0, "1", new(float64)},
{true, "1", new(bool)},
{false, "0", new(bool)},
}
@@ -5481,13 +6612,374 @@ var _ = Describe("Commands", func() {
})
})
+ Describe("EvalRO", func() {
+ It("returns keys and values", func() {
+ vals, err := client.EvalRO(
+ ctx,
+ "return {KEYS[1],ARGV[1]}",
+ []string{"key"},
+ "hello",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"key", "hello"}))
+ })
+
+ It("returns all values after an error", func() {
+ vals, err := client.EvalRO(
+ ctx,
+ `return {12, {err="error"}, "abc"}`,
+ nil,
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{int64(12), proto.RedisError("error"), "abc"}))
+ })
+ })
+
+ Describe("Functions", func() {
+ var (
+ q redis.FunctionListQuery
+ lib1Code string
+ lib2Code string
+ lib1 redis.Library
+ lib2 redis.Library
+ )
+
+ BeforeEach(func() {
+ flush := client.FunctionFlush(ctx)
+ Expect(flush.Err()).NotTo(HaveOccurred())
+
+ lib1 = redis.Library{
+ Name: "mylib1",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib1_func1",
+ Description: "This is the func-1 of lib 1",
+ Flags: []string{"allow-oom", "allow-stale"},
+ },
+ },
+ Code: `#!lua name=%s
+
+ local function f1(keys, args)
+ local hash = keys[1] -- Get the key name
+ local time = redis.call('TIME')[1] -- Get the current time from the Redis server
+
+ -- Add the current timestamp to the arguments that the user passed to the function, stored in args
+ table.insert(args, '_updated_at')
+ table.insert(args, time)
+
+ -- Run HSET with the updated argument list
+ return redis.call('HSET', hash, unpack(args))
+ end
+
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f1,
+ flags={'%s', '%s'}
+ }`,
+ }
+
+ lib2 = redis.Library{
+ Name: "mylib2",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib2_func1",
+ Flags: []string{},
+ },
+ {
+ Name: "lib2_func2",
+ Description: "This is the func-2 of lib 2",
+ Flags: []string{"no-writes"},
+ },
+ },
+ Code: `#!lua name=%s
+
+ local function f1(keys, args)
+ return 'Function 1'
+ end
+
+ local function f2(keys, args)
+ return 'Function 2'
+ end
+
+ redis.register_function('%s', f1)
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f2,
+ flags={'%s'}
+ }`,
+ }
+
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+ lib2Code = fmt.Sprintf(lib2.Code, lib2.Name, lib2.Functions[0].Name,
+ lib2.Functions[1].Name, lib2.Functions[1].Description, lib2.Functions[1].Flags[0])
+
+ q = redis.FunctionListQuery{}
+ })
+
+ It("Loads a new library", Label("NonRedisEnterprise"), func() {
+ functionLoad := client.FunctionLoad(ctx, lib1Code)
+ Expect(functionLoad.Err()).NotTo(HaveOccurred())
+ Expect(functionLoad.Val()).To(Equal(lib1.Name))
+
+ functionList := client.FunctionList(ctx, q)
+ Expect(functionList.Err()).NotTo(HaveOccurred())
+ Expect(functionList.Val()).To(HaveLen(1))
+ })
+
+ It("Loads and replaces a new library", Label("NonRedisEnterprise"), func() {
+ // Load a library for the first time
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ newFuncName := "replaces_func_name"
+ newFuncDesc := "replaces_func_desc"
+ flag1, flag2 := "allow-stale", "no-cluster"
+ newCode := fmt.Sprintf(lib1.Code, lib1.Name, newFuncName, newFuncDesc, flag1, flag2)
+
+ // And then replace it
+ functionLoadReplace := client.FunctionLoadReplace(ctx, newCode)
+ Expect(functionLoadReplace.Err()).NotTo(HaveOccurred())
+ Expect(functionLoadReplace.Val()).To(Equal(lib1.Name))
+
+ lib, err := client.FunctionList(ctx, q).First()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lib.Functions).To(Equal([]redis.Function{
+ {
+ Name: newFuncName,
+ Description: newFuncDesc,
+ Flags: []string{flag1, flag2},
+ },
+ }))
+ })
+
+ It("Deletes a library", func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionDelete(ctx, lib1.Name).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: lib1.Name,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(0))
+ })
+
+ It("Flushes all libraries", func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.FunctionFlush(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(0))
+ })
+
+ It("Flushes all libraries asynchronously", func() {
+ functionLoad := client.FunctionLoad(ctx, lib1Code)
+ Expect(functionLoad.Err()).NotTo(HaveOccurred())
+
+ // we only verify the command result.
+ functionFlush := client.FunctionFlushAsync(ctx)
+ Expect(functionFlush.Err()).NotTo(HaveOccurred())
+ })
+
+ It("Kills a running function", func() {
+ functionKill := client.FunctionKill(ctx)
+ Expect(functionKill.Err()).To(MatchError("NOTBUSY No scripts in execution right now."))
+
+ // Add test for a long-running function, once we make the test for `function stats` pass
+ })
+
+ It("Lists registered functions", Label("NonRedisEnterprise"), func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: "*",
+ WithCode: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(1))
+ Expect(val[0].Name).To(Equal(lib1.Name))
+ Expect(val[0].Engine).To(Equal(lib1.Engine))
+ Expect(val[0].Code).To(Equal(lib1Code))
+ Expect(val[0].Functions).Should(ConsistOf(lib1.Functions))
+
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err = client.FunctionList(ctx, redis.FunctionListQuery{
+ WithCode: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(HaveLen(2))
+
+ lib, err := client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: lib2.Name,
+ WithCode: false,
+ }).First()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(lib.Name).To(Equal(lib2.Name))
+ Expect(lib.Code).To(Equal(""))
+
+ _, err = client.FunctionList(ctx, redis.FunctionListQuery{
+ LibraryNamePattern: "non_lib",
+ WithCode: true,
+ }).First()
+ Expect(err).To(Equal(redis.Nil))
+ })
+
+ It("Dump and restores all libraries", Label("NonRedisEnterprise"), func() {
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump, err := client.FunctionDump(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dump).NotTo(BeEmpty())
+
+ err = client.FunctionRestore(ctx, dump).Err()
+ Expect(err).To(HaveOccurred())
+
+ err = client.FunctionFlush(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ list, err := client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(list).To(HaveLen(0))
+
+ err = client.FunctionRestore(ctx, dump).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ list, err = client.FunctionList(ctx, q).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(list).To(HaveLen(2))
+ })
+
+ It("Calls a function", func() {
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ x := client.FCall(ctx, lib1.Functions[0].Name, []string{"my_hash"}, "a", 1, "b", 2)
+ Expect(x.Err()).NotTo(HaveOccurred())
+ Expect(x.Int()).To(Equal(3))
+ })
+
+ It("Calls a function as read-only", func() {
+ lib1Code = fmt.Sprintf(lib1.Code, lib1.Name, lib1.Functions[0].Name,
+ lib1.Functions[0].Description, lib1.Functions[0].Flags[0], lib1.Functions[0].Flags[1])
+
+ err := client.FunctionLoad(ctx, lib1Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // This function doesn't have a "no-writes" flag
+ x := client.FCallRo(ctx, lib1.Functions[0].Name, []string{"my_hash"}, "a", 1, "b", 2)
+
+ Expect(x.Err()).To(HaveOccurred())
+
+ lib2Code = fmt.Sprintf(lib2.Code, lib2.Name, lib2.Functions[0].Name, lib2.Functions[1].Name,
+ lib2.Functions[1].Description, lib2.Functions[1].Flags[0])
+
+ // This function has a "no-writes" flag
+ err = client.FunctionLoad(ctx, lib2Code).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ x = client.FCallRo(ctx, lib2.Functions[1].Name, []string{})
+
+ Expect(x.Err()).NotTo(HaveOccurred())
+ Expect(x.Text()).To(Equal("Function 2"))
+ })
+
+ It("Shows function stats", func() {
+ defer client.FunctionKill(ctx)
+
+ // We can not run blocking commands in Redis functions, so we're using an infinite loop,
+ // but we're killing the function after calling FUNCTION STATS
+ lib := redis.Library{
+ Name: "mylib1",
+ Engine: "LUA",
+ Functions: []redis.Function{
+ {
+ Name: "lib1_func1",
+ Description: "This is the func-1 of lib 1",
+ Flags: []string{"no-writes"},
+ },
+ },
+ Code: `#!lua name=%s
+ local function f1(keys, args)
+ local i = 0
+ while true do
+ i = i + 1
+ end
+ end
+
+ redis.register_function{
+ function_name='%s',
+ description ='%s',
+ callback=f1,
+ flags={'%s'}
+ }`,
+ }
+ libCode := fmt.Sprintf(lib.Code, lib.Name, lib.Functions[0].Name,
+ lib.Functions[0].Description, lib.Functions[0].Flags[0])
+ err := client.FunctionLoad(ctx, libCode).Err()
+
+ Expect(err).NotTo(HaveOccurred())
+
+ r, err := client.FunctionStats(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(r.Engines)).To(Equal(1))
+ Expect(r.Running()).To(BeFalse())
+
+ started := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ client2 := redis.NewClient(redisOptions())
+
+ started <- true
+ _, err = client2.FCall(ctx, lib.Functions[0].Name, nil).Result()
+ Expect(err).To(HaveOccurred())
+ }()
+
+ <-started
+ time.Sleep(1 * time.Second)
+ r, err = client.FunctionStats(ctx).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(r.Engines)).To(Equal(1))
+ rs, isRunning := r.RunningScript()
+ Expect(isRunning).To(BeTrue())
+ Expect(rs.Name).To(Equal(lib.Functions[0].Name))
+ Expect(rs.Duration > 0).To(BeTrue())
+
+ close(started)
+ })
+ })
+
Describe("SlowLogGet", func() {
It("returns slow query result", func() {
const key = "slowlog-log-slower-than"
old := client.ConfigGet(ctx, key).Val()
client.ConfigSet(ctx, key, "0")
- defer client.ConfigSet(ctx, key, old[1].(string))
+ defer client.ConfigSet(ctx, key, old[key])
err := client.Do(ctx, "slowlog", "reset").Err()
Expect(err).NotTo(HaveOccurred())
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go
index 5526253..5526253 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/doc.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doc.go
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md
new file mode 100644
index 0000000..2623c18
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/README.md
@@ -0,0 +1,22 @@
+# Command examples for redis.io
+
+These examples appear on the [Redis documentation](https://redis.io) site as part of the tabbed examples interface.
+
+## How to add examples
+
+- Create a Go test file with a meaningful name in the current folder.
+- Create a single method prefixed with `Example` and write your test in it.
+- Determine the id for the example you're creating and add it as the first line of the file: `// EXAMPLE: set_and_get`.
+- We're using the [Testable Examples](https://go.dev/blog/examples) feature of Go to test the desired output has been written to stdout.
+
+### Special markup
+
+See https://github.com/redis-stack/redis-stack-website#readme for more details.
+
+## How to test the examples
+
+- Start a Redis server locally on port 6379
+- CD into the `doctests` directory
+- Run `go test` to test all examples in the directory.
+- Run `go test filename.go` to test a single file
+
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go
new file mode 100644
index 0000000..1e69f4b
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/lpush_lrange_test.go
@@ -0,0 +1,48 @@
+// EXAMPLE: lpush_and_lrange
+// HIDE_START
+package example_commands_test
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient_LPush_and_lrange() {
+ ctx := context.Background()
+
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password docs
+ DB: 0, // use default DB
+ })
+
+ // HIDE_END
+
+ // REMOVE_START
+ errFlush := rdb.FlushDB(ctx).Err() // Clear the database before each test
+ if errFlush != nil {
+ panic(errFlush)
+ }
+ // REMOVE_END
+
+ listSize, err := rdb.LPush(ctx, "my_bikes", "bike:1", "bike:2").Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(listSize)
+
+ value, err := rdb.LRange(ctx, "my_bikes", 0, -1).Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(value)
+ // HIDE_START
+
+ // Output: 2
+ // [bike:2 bike:1]
+}
+
+// HIDE_END
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go
new file mode 100644
index 0000000..ab3a936
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/doctests/set_get_test.go
@@ -0,0 +1,48 @@
+// EXAMPLE: set_and_get
+// HIDE_START
+package example_commands_test
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient_Set_and_get() {
+ ctx := context.Background()
+
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password docs
+ DB: 0, // use default DB
+ })
+
+ // HIDE_END
+
+ // REMOVE_START
+ errFlush := rdb.FlushDB(ctx).Err() // Clear the database before each test
+ if errFlush != nil {
+ panic(errFlush)
+ }
+ // REMOVE_END
+
+ err := rdb.Set(ctx, "bike:1", "Process 134", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("OK")
+
+ value, err := rdb.Get(ctx, "bike:1").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("The name of the bike is %s", value)
+ // HIDE_START
+
+ // Output: OK
+ // The name of the bike is Process 134
+}
+
+// HIDE_END
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go
index 521594b..8a59913 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/error.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/error.go
@@ -2,17 +2,29 @@ package redis
import (
"context"
+ "errors"
"io"
"net"
"strings"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
+// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
+func HasErrorPrefix(err error, prefix string) bool {
+ var rErr Error
+ if !errors.As(err, &rErr) {
+ return false
+ }
+ msg := rErr.Error()
+ msg = strings.TrimPrefix(msg, "ERR ") // KVRocks adds such prefix
+ return strings.HasPrefix(msg, prefix)
+}
+
type Error interface {
error
@@ -91,7 +103,7 @@ func isBadConn(err error, allowTimeout bool, addr string) bool {
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return !netErr.Temporary()
+ return false
}
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go
index d66edce..a6069cf 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_instrumentation_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_instrumentation_test.go
@@ -3,32 +3,40 @@ package redis_test
import (
"context"
"fmt"
+ "net"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
type redisHook struct{}
var _ redis.Hook = redisHook{}
-func (redisHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- fmt.Printf("starting processing: <%s>\n", cmd)
- return ctx, nil
+func (redisHook) DialHook(hook redis.DialHook) redis.DialHook {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ fmt.Printf("dialing %s %s\n", network, addr)
+ conn, err := hook(ctx, network, addr)
+ fmt.Printf("finished dialing %s %s\n", network, addr)
+ return conn, err
+ }
}
-func (redisHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- fmt.Printf("finished processing: <%s>\n", cmd)
- return nil
+func (redisHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ fmt.Printf("starting processing: <%s>\n", cmd)
+ err := hook(ctx, cmd)
+ fmt.Printf("finished processing: <%s>\n", cmd)
+ return err
+ }
}
-func (redisHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- fmt.Printf("pipeline starting processing: %v\n", cmds)
- return ctx, nil
-}
-
-func (redisHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
- fmt.Printf("pipeline finished processing: %v\n", cmds)
- return nil
+func (redisHook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ fmt.Printf("pipeline starting processing: %v\n", cmds)
+ err := hook(ctx, cmds)
+ fmt.Printf("pipeline finished processing: %v\n", cmds)
+ return err
+ }
}
func Example_instrumentation() {
@@ -39,6 +47,8 @@ func Example_instrumentation() {
rdb.Ping(ctx)
// Output: starting processing: <ping: >
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// finished processing: <ping: PONG>
}
@@ -54,6 +64,8 @@ func ExamplePipeline_instrumentation() {
return nil
})
// Output: pipeline starting processing: [ping: ping: ]
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// pipeline finished processing: [ping: PONG ping: PONG]
}
@@ -70,6 +82,8 @@ func ExampleClient_Watch_instrumentation() {
}, "foo")
// Output:
// starting processing: <watch foo: >
+ // dialing tcp :6379
+ // finished dialing tcp :6379
// finished processing: <watch foo: OK>
// starting processing: <ping: >
// finished processing: <ping: PONG>
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go
index f015809..62aa8cb 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/example_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/example_test.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var (
@@ -154,8 +154,8 @@ func ExampleClient() {
// missing_key does not exist
}
-func ExampleConn() {
- conn := rdb.Conn(context.Background())
+func ExampleConn_name() {
+ conn := rdb.Conn()
err := conn.ClientSetName(ctx, "foobar").Err()
if err != nil {
@@ -175,6 +175,28 @@ func ExampleConn() {
// Output: foobar
}
+func ExampleConn_client_setInfo_libraryVersion() {
+ conn := rdb.Conn()
+
+ err := conn.ClientSetInfo(ctx, redis.WithLibraryVersion("1.2.3")).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // Open other connections.
+ for i := 0; i < 10; i++ {
+ go rdb.Ping(ctx)
+ }
+
+ s, err := conn.ClientInfo(ctx).Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(s.LibVer)
+ // Output: 1.2.3
+}
+
func ExampleClient_Set() {
// Last argument is expiration. Zero means the key has no
// expiration time.
@@ -190,8 +212,23 @@ func ExampleClient_Set() {
}
}
-func ExampleClient_SetEX() {
- err := rdb.SetEX(ctx, "key", "value", time.Hour).Err()
+func ExampleClient_SetEx() {
+ err := rdb.SetEx(ctx, "key", "value", time.Hour).Err()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleClient_HSet() {
+ // Set "redis" tag for hash key
+ type ExampleUser struct {
+ Name string `redis:"name"`
+ Age int `redis:"age"`
+ }
+
+ items := ExampleUser{"jane", 22}
+
+ err := rdb.HSet(ctx, "user:1", items).Err()
if err != nil {
panic(err)
}
@@ -212,7 +249,7 @@ func ExampleClient_BLPop() {
panic(err)
}
- // use `rdb.BLPop(0, "queue")` for infinite waiting time
+ // use `rdb.BLPop(ctx, 0, "queue")` for infinite waiting time
result, err := rdb.BLPop(ctx, 1*time.Second, "queue").Result()
if err != nil {
panic(err)
@@ -278,9 +315,38 @@ func ExampleClient_ScanType() {
// Output: found 33 keys
}
-// ExampleStringStringMapCmd_Scan shows how to scan the results of a map fetch
+// ExampleClient_ScanType_hashType uses the keyType "hash".
+func ExampleClient_ScanType_hashType() {
+ rdb.FlushDB(ctx)
+ for i := 0; i < 33; i++ {
+ err := rdb.HSet(context.TODO(), fmt.Sprintf("key%d", i), "value", "foo").Err()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ var allKeys []string
+ var cursor uint64
+ var err error
+
+ for {
+ var keysFromScan []string
+ keysFromScan, cursor, err = rdb.ScanType(context.TODO(), cursor, "key*", 10, "hash").Result()
+ if err != nil {
+ panic(err)
+ }
+ allKeys = append(allKeys, keysFromScan...)
+ if cursor == 0 {
+ break
+ }
+ }
+ fmt.Printf("%d keys ready for use", len(allKeys))
+ // Output: 33 keys ready for use
+}
+
+// ExampleMapStringStringCmd_Scan shows how to scan the results of a map fetch
// into a struct.
-func ExampleStringStringMapCmd_Scan() {
+func ExampleMapStringStringCmd_Scan() {
rdb.FlushDB(ctx)
err := rdb.HMSet(ctx, "map",
"name", "hello",
@@ -404,7 +470,7 @@ func ExampleClient_TxPipeline() {
}
func ExampleClient_Watch() {
- const maxRetries = 1000
+ const maxRetries = 10000
// Increment transactionally increments key using GET and SET commands.
increment := func(key string) error {
@@ -613,11 +679,16 @@ func ExampleNewUniversalClient_cluster() {
}
func ExampleClient_SlowLogGet() {
+ if RECluster {
+ // skip slowlog test for cluster
+ fmt.Println(2)
+ return
+ }
const key = "slowlog-log-slower-than"
old := rdb.ConfigGet(ctx, key).Val()
rdb.ConfigSet(ctx, key, "0")
- defer rdb.ConfigSet(ctx, key, old[1].(string))
+ defer rdb.ConfigSet(ctx, key, old[key])
if err := rdb.Do(ctx, "slowlog", "reset").Err(); err != nil {
panic(err)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go
index 49c4b94..3f92983 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/export_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/export_test.go
@@ -6,9 +6,9 @@ import (
"net"
"strings"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
)
func (c *baseClient) Pool() pool.Pooler {
@@ -60,21 +60,21 @@ func (c *ClusterClient) SwapNodes(ctx context.Context, key string) error {
return nil
}
-func (state *clusterState) IsConsistent(ctx context.Context) bool {
- if len(state.Masters) < 3 {
+func (c *clusterState) IsConsistent(ctx context.Context) bool {
+ if len(c.Masters) < 3 {
return false
}
- for _, master := range state.Masters {
+ for _, master := range c.Masters {
s := master.Client.Info(ctx, "replication").Val()
if !strings.Contains(s, "role:master") {
return false
}
}
- if len(state.Slaves) < 3 {
+ if len(c.Slaves) < 3 {
return false
}
- for _, slave := range state.Slaves {
+ for _, slave := range c.Slaves {
s := slave.Client.Info(ctx, "replication").Val()
if !strings.Contains(s, "role:slave") {
return false
@@ -85,11 +85,20 @@ func (state *clusterState) IsConsistent(ctx context.Context) bool {
}
func GetSlavesAddrByName(ctx context.Context, c *SentinelClient, name string) []string {
- addrs, err := c.Slaves(ctx, name).Result()
+ addrs, err := c.Replicas(ctx, name).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
name, err)
return []string{}
}
- return parseSlaveAddrs(addrs, false)
+ return parseReplicaAddrs(addrs, false)
+}
+
+func (c *Ring) ShardByName(name string) *ringShard {
+ shard, _ := c.sharding.GetByName(name)
+ return shard
+}
+
+func (c *ModuleLoadexConfig) ToArgs() []interface{} {
+ return c.toArgs()
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go
index 3225d24..d4b2ac4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/fuzz/fuzz.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/fuzz/fuzz.go
@@ -7,7 +7,7 @@ import (
"context"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var (
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go
new file mode 100644
index 0000000..e0d49a6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands.go
@@ -0,0 +1,149 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strings"
+)
+
+type GearsCmdable interface {
+ TFunctionLoad(ctx context.Context, lib string) *StatusCmd
+ TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd
+ TFunctionDelete(ctx context.Context, libName string) *StatusCmd
+ TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd
+ TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd
+ TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
+ TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
+ TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd
+ TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd
+}
+
+type TFunctionLoadOptions struct {
+ Replace bool
+ Config string
+}
+
+type TFunctionListOptions struct {
+ Withcode bool
+ Verbose int
+ Library string
+}
+
+type TFCallOptions struct {
+ Keys []string
+ Arguments []string
+}
+
+// TFunctionLoad - load a new JavaScript library into Redis.
+// For more information - https://redis.io/commands/tfunction-load/
+func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "LOAD", lib}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "LOAD"}
+ if options != nil {
+ if options.Replace {
+ args = append(args, "REPLACE")
+ }
+ if options.Config != "" {
+ args = append(args, "CONFIG", options.Config)
+ }
+ }
+ args = append(args, lib)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFunctionDelete - delete a JavaScript library from Redis.
+// For more information - https://redis.io/commands/tfunction-delete/
+func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd {
+ args := []interface{}{"TFUNCTION", "DELETE", libName}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFunctionList - list the functions with additional information about each function.
+// For more information - https://redis.io/commands/tfunction-list/
+func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd {
+ args := []interface{}{"TFUNCTION", "LIST"}
+ cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd {
+ args := []interface{}{"TFUNCTION", "LIST"}
+ if options != nil {
+ if options.Withcode {
+ args = append(args, "WITHCODE")
+ }
+ if options.Verbose != 0 {
+ v := strings.Repeat("v", options.Verbose)
+ args = append(args, v)
+ }
+ if options.Library != "" {
+ args = append(args, "LIBRARY", options.Library)
+ }
+ }
+ cmd := NewMapStringInterfaceSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFCall - invoke a function.
+// For more information - https://redis.io/commands/tfcall/
+func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
+ lf := libName + "." + funcName
+ args := []interface{}{"TFCALL", lf, numKeys}
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
+ lf := libName + "." + funcName
+ args := []interface{}{"TFCALL", lf, numKeys}
+ if options != nil {
+ for _, key := range options.Keys {
+ args = append(args, key)
+ }
+ for _, key := range options.Arguments {
+ args = append(args, key)
+ }
+ }
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine).
+// For more information - https://redis.io/commands/TFCallASYNC/
+func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd {
+ lf := fmt.Sprintf("%s.%s", libName, funcName)
+ args := []interface{}{"TFCALLASYNC", lf, numKeys}
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd {
+ lf := fmt.Sprintf("%s.%s", libName, funcName)
+ args := []interface{}{"TFCALLASYNC", lf, numKeys}
+ if options != nil {
+ for _, key := range options.Keys {
+ args = append(args, key)
+ }
+ for _, key := range options.Arguments {
+ args = append(args, key)
+ }
+ }
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go
new file mode 100644
index 0000000..b1117a4
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/gears_commands_test.go
@@ -0,0 +1,114 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func libCode(libName string) string {
+ return fmt.Sprintf("#!js api_version=1.0 name=%s\n redis.registerFunction('foo', ()=>{{return 'bar'}})", libName)
+}
+
+func libCodeWithConfig(libName string) string {
+ lib := `#!js api_version=1.0 name=%s
+
+ var last_update_field_name = "__last_update__"
+
+ if (redis.config.last_update_field_name !== undefined) {
+ if (typeof redis.config.last_update_field_name != 'string') {
+ throw "last_update_field_name must be a string";
+ }
+ last_update_field_name = redis.config.last_update_field_name
+ }
+
+ redis.registerFunction("hset", function(client, key, field, val){
+ // get the current time in ms
+ var curr_time = client.call("time")[0];
+ return client.call('hset', key, field, val, last_update_field_name, curr_time);
+ });`
+ return fmt.Sprintf(lib, libName)
+}
+
+var _ = Describe("RedisGears commands", Label("gears"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ client.TFunctionDelete(ctx, "lib1")
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should TFunctionLoad, TFunctionLoadArgs and TFunctionDelete ", Label("gears", "tfunctionload"), func() {
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFunctionLoadOptions{Replace: true, Config: `{"last_update_field_name":"last_update"}`}
+ resultAdd, err = client.TFunctionLoadArgs(ctx, libCodeWithConfig("lib1"), opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ })
+ It("should TFunctionList", Label("gears", "tfunctionlist"), func() {
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultList, err := client.TFunctionList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultList[0]["engine"]).To(BeEquivalentTo("js"))
+ opt := &redis.TFunctionListOptions{Withcode: true, Verbose: 2}
+ resultListArgs, err := client.TFunctionListArgs(ctx, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultListArgs[0]["code"]).NotTo(BeEquivalentTo(""))
+ })
+
+ It("should TFCall", Label("gears", "tfcall"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultAdd, err = client.TFCall(ctx, "lib1", "foo", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallArgs", Label("gears", "tfcallargs"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFCallOptions{Arguments: []string{"foo", "bar"}}
+ resultAdd, err = client.TFCallArgs(ctx, "lib1", "foo", 0, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallASYNC", Label("gears", "TFCallASYNC"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ resultAdd, err = client.TFCallASYNC(ctx, "lib1", "foo", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+
+ It("should TFCallASYNCArgs", Label("gears", "TFCallASYNCargs"), func() {
+ var resultAdd interface{}
+ resultAdd, err := client.TFunctionLoad(ctx, libCode("lib1")).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("OK"))
+ opt := &redis.TFCallOptions{Arguments: []string{"foo", "bar"}}
+ resultAdd, err = client.TFCallASYNCArgs(ctx, "lib1", "foo", 0, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo("bar"))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go
new file mode 100644
index 0000000..dc6c3fe
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/generic_commands.go
@@ -0,0 +1,384 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type GenericCmdable interface {
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireTime(ctx context.Context, key string) *DurationCmd
+ ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectFreq(ctx context.Context, key string) *IntCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PExpireTime(ctx context.Context, key string) *DurationCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+ ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "expire"
+ args[1] = key
+ args[2] = formatSec(ctx, expiration)
+ if mode != "" {
+ args = append(args, mode)
+ }
+
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "expiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectFreq(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "freq", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(command, key string) []interface{} {
+ args := []interface{}{command, key}
+
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args("sort", key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+ args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+ if replace {
+ args = append(args, "REPLACE")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go
new file mode 100644
index 0000000..f047b98
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/geo_commands.go
@@ -0,0 +1,155 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type GeoCmdable interface {
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+ GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+ GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+ args := make([]interface{}, 0, 13)
+ args = append(args, "geosearch", key)
+ args = geoSearchArgs(q, args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+ ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+ args := make([]interface{}, 0, 16)
+ args = append(args, "geosearch", key)
+ args = geoSearchLocationArgs(q, args)
+ cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+ args := make([]interface{}, 0, 15)
+ args = append(args, "geosearchstore", store, key)
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+ if q.StoreDist {
+ args = append(args, "storedist")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod
new file mode 100644
index 0000000..6c65f09
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.mod
@@ -0,0 +1,10 @@
+module github.com/redis/go-redis/v9
+
+go 1.18
+
+require (
+ github.com/bsm/ginkgo/v2 v2.12.0
+ github.com/bsm/gomega v1.27.10
+ github.com/cespare/xxhash/v2 v2.2.0
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+)
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum
new file mode 100644
index 0000000..21b4f64
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/go.sum
@@ -0,0 +1,8 @@
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go
new file mode 100644
index 0000000..2c62a75
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hash_commands.go
@@ -0,0 +1,174 @@
+package redis
+
+import "context"
+
+type HashCmdable interface {
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *MapStringStringCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int) *StringSliceCmd
+ HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd
+}
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+//
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+//
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Playing struct With "redis" tag.
+// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` }
+//
+// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0
+//
+// For struct, can be a structure pointer type, we only parse the field whose tag is redis.
+// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it,
+// or you don't need to set the redis tag.
+// For the type of structure field, we only support simple data types:
+// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ),
+// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface.
+//
+// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair.
+// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.)
+// If you are using a Struct type and the number of fields is greater than one,
+// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hrandfield", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandFieldWithValues redis-server version >= 6.2.0.
+func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd {
+ cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go
new file mode 100644
index 0000000..5a608fa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/hyperloglog_commands.go
@@ -0,0 +1,42 @@
+package redis
+
+import "context"
+
+type HyperLogLogCmdable interface {
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+}
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go
index b97fa0d..2e5ca33 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/arg.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/arg.go
@@ -4,6 +4,8 @@ import (
"fmt"
"strconv"
"time"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
func AppendArg(b []byte, v interface{}) []byte {
@@ -11,7 +13,7 @@ func AppendArg(b []byte, v interface{}) []byte {
case nil:
return append(b, "<nil>"...)
case string:
- return appendUTF8String(b, Bytes(v))
+ return appendUTF8String(b, util.StringToBytes(v))
case []byte:
return appendUTF8String(b, v)
case int:
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go
index b3a4f21..f13ee81 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag.go
@@ -3,7 +3,7 @@ package hashtag
import (
"strings"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
const slotNumber = 16384
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go
index c0b6396..fe4865b 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hashtag/hashtag_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hashtag/hashtag_test.go
@@ -3,10 +3,10 @@ package hashtag
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
func TestGinkgoSuite(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go
index 852c8bd..203ec4a 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan.go
@@ -10,6 +10,12 @@ import (
// decoderFunc represents decoding functions for default built-in types.
type decoderFunc func(reflect.Value, string) error
+// Scanner is the interface implemented by themselves,
+// which will override the decoding behavior of decoderFunc.
+type Scanner interface {
+ ScanRedis(s string) error
+}
+
var (
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
decoders = []decoderFunc{
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go
index ab4c0e1..6c9b303 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/hscan_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/hscan_test.go
@@ -4,9 +4,12 @@ import (
"math"
"strconv"
"testing"
+ "time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
type data struct {
@@ -28,6 +31,21 @@ type data struct {
Float float32 `redis:"float"`
Float64 float64 `redis:"float64"`
Bool bool `redis:"bool"`
+ BoolRef *bool `redis:"boolRef"`
+}
+
+type TimeRFC3339Nano struct {
+ time.Time
+}
+
+func (t *TimeRFC3339Nano) ScanRedis(s string) (err error) {
+ t.Time, err = time.Parse(time.RFC3339Nano, s)
+ return
+}
+
+type TimeData struct {
+ Name string `redis:"name"`
+ Time *TimeRFC3339Nano `redis:"login"`
}
type i []interface{}
@@ -102,10 +120,10 @@ var _ = Describe("Scan", func() {
Expect(Scan(&d, i{"key"}, i{"value"})).NotTo(HaveOccurred())
Expect(d).To(Equal(data{}))
- keys := i{"string", "byte", "int", "int64", "uint", "uint64", "float", "float64", "bool"}
+ keys := i{"string", "byte", "int", "int64", "uint", "uint64", "float", "float64", "bool", "boolRef"}
vals := i{
"str!", "bytes!", "123", "123456789123456789", "456", "987654321987654321",
- "123.456", "123456789123456789.987654321987654321", "1",
+ "123.456", "123456789123456789.987654321987654321", "1", "1",
}
Expect(Scan(&d, keys, vals)).NotTo(HaveOccurred())
Expect(d).To(Equal(data{
@@ -118,6 +136,7 @@ var _ = Describe("Scan", func() {
Float: 123.456,
Float64: 1.2345678912345678e+17,
Bool: true,
+ BoolRef: util.ToPtr(true),
}))
// Scan a different type with the same values to test that
@@ -152,6 +171,7 @@ var _ = Describe("Scan", func() {
Float: 1.0,
Float64: 1.2345678912345678e+17,
Bool: true,
+ BoolRef: util.ToPtr(true),
}))
})
@@ -175,4 +195,26 @@ var _ = Describe("Scan", func() {
Expect(Scan(&d, i{"bool"}, i{""})).To(HaveOccurred())
Expect(Scan(&d, i{"bool"}, i{"123"})).To(HaveOccurred())
})
+
+ It("Implements Scanner", func() {
+ var td TimeData
+
+ now := time.Now()
+ Expect(Scan(&td, i{"name", "login"}, i{"hello", now.Format(time.RFC3339Nano)})).NotTo(HaveOccurred())
+ Expect(td.Name).To(Equal("hello"))
+ Expect(td.Time.UnixNano()).To(Equal(now.UnixNano()))
+ Expect(td.Time.Format(time.RFC3339Nano)).To(Equal(now.Format(time.RFC3339Nano)))
+ })
+
+ It("should time.Time RFC3339Nano", func() {
+ type TimeTime struct {
+ Time time.Time `redis:"time"`
+ }
+
+ now := time.Now()
+
+ var tt TimeTime
+ Expect(Scan(&tt, i{"time"}, i{now.Format(time.RFC3339Nano)})).NotTo(HaveOccurred())
+ Expect(now.Unix()).To(Equal(tt.Time.Unix()))
+ })
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go
index 6839412..1a560e4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/hscan/structmap.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/hscan/structmap.go
@@ -1,10 +1,13 @@
package hscan
import (
+ "encoding"
"fmt"
"reflect"
"strings"
"sync"
+
+ "github.com/redis/go-redis/v9/internal/util"
)
// structMap contains the map of struct fields for target structs
@@ -58,7 +61,11 @@ func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
}
// Use the built-in decoder.
- out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+ kind := f.Type.Kind()
+ if kind == reflect.Pointer {
+ kind = f.Type.Elem().Kind()
+ }
+ out.set(tag, &structField{index: i, fn: decoders[kind]})
}
return out
@@ -84,7 +91,32 @@ func (s StructValue) Scan(key string, value string) error {
if !ok {
return nil
}
- if err := field.fn(s.value.Field(field.index), value); err != nil {
+
+ v := s.value.Field(field.index)
+ isPtr := v.Kind() == reflect.Ptr
+
+ if isPtr && v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if !isPtr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ isPtr = true
+ }
+
+ if isPtr && v.Type().NumMethod() > 0 && v.CanInterface() {
+ switch scan := v.Interface().(type) {
+ case Scanner:
+ return scan.ScanRedis(value)
+ case encoding.TextUnmarshaler:
+ return scan.UnmarshalText(util.StringToBytes(value))
+ }
+ }
+
+ if isPtr {
+ v = v.Elem()
+ }
+
+ if err := field.fn(v, value); err != nil {
t := s.value.Type()
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go
index 4a59c59..e783d13 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal.go
@@ -3,7 +3,7 @@ package internal
import (
"time"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal/rand"
)
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go
index bfdcbbb..00bfd6e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/internal_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/internal_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/gomega"
)
func TestRetryBackoff(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go
index c8b9213..c8b9213 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/log.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/log.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go
index 64f4627..b81244f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/once.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/once.go
@@ -32,7 +32,9 @@ type Once struct {
// Do calls the function f if and only if Do has not been invoked
// without error for this instance of Once. In other words, given
-// var once Once
+//
+// var once Once
+//
// if once.Do(f) is called multiple times, only the first call will
// invoke f, even if f has a different value in each invocation unless
// f returns an error. A new instance of Once is required for each
@@ -41,7 +43,8 @@ type Once struct {
// Do is intended for initialization that must be run exactly once. Since f
// is niladic, it may be necessary to use a function literal to capture the
// arguments to a function to be invoked by Do:
-// err := config.once.Do(func() error { return config.init(filename) })
+//
+// err := config.once.Do(func() error { return config.init(filename) })
func (o *Once) Do(f func() error) error {
if atomic.LoadUint32(&o.done) == 1 {
return nil
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go
index dec5d3f..71049f4 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/bench_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/bench_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
type poolGetPutBenchmark struct {
@@ -30,11 +30,10 @@ func BenchmarkPoolGetPut(b *testing.B) {
for _, bm := range benchmarks {
b.Run(bm.String(), func(b *testing.B) {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: bm.poolSize,
- PoolTimeout: time.Second,
- IdleTimeout: time.Hour,
- IdleCheckFrequency: time.Hour,
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ ConnMaxIdleTime: time.Hour,
})
b.ResetTimer()
@@ -74,11 +73,10 @@ func BenchmarkPoolGetRemove(b *testing.B) {
for _, bm := range benchmarks {
b.Run(bm.String(), func(b *testing.B) {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: bm.poolSize,
- PoolTimeout: time.Second,
- IdleTimeout: time.Hour,
- IdleCheckFrequency: time.Hour,
+ Dialer: dummyDialer,
+ PoolSize: bm.poolSize,
+ PoolTimeout: time.Second,
+ ConnMaxIdleTime: time.Hour,
})
b.ResetTimer()
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go
index 5661659..7f45bc0 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/conn.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn.go
@@ -7,7 +7,7 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
var noDeadline = time.Time{}
@@ -63,9 +63,13 @@ func (cn *Conn) RemoteAddr() net.Addr {
return nil
}
-func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
+func (cn *Conn) WithReader(
+ ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error,
+) error {
+ if timeout >= 0 {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
}
return fn(cn.rd)
}
@@ -73,8 +77,10 @@ func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(r
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
- if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
+ if timeout >= 0 {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
}
if cn.bw.Buffered() > 0 {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go
new file mode 100644
index 0000000..83190d3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check.go
@@ -0,0 +1,49 @@
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+
+package pool
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+ "time"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ // Reset previous timeout.
+ _ = conn.SetDeadline(time.Time{})
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ var sysErr error
+
+ if err := rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ }); err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go
new file mode 100644
index 0000000..295da12
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_dummy.go
@@ -0,0 +1,9 @@
+//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
+
+package pool
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go
new file mode 100644
index 0000000..2ade8a0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/conn_check_test.go
@@ -0,0 +1,47 @@
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+
+package pool
+
+import (
+ "net"
+ "net/http/httptest"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+var _ = Describe("tests conn_check with real conns", func() {
+ var ts *httptest.Server
+ var conn net.Conn
+ var err error
+
+ BeforeEach(func() {
+ ts = httptest.NewServer(nil)
+ conn, err = net.DialTimeout(ts.Listener.Addr().Network(), ts.Listener.Addr().String(), time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ ts.Close()
+ })
+
+ It("good conn check", func() {
+ Expect(connCheck(conn)).NotTo(HaveOccurred())
+
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ Expect(connCheck(conn)).To(HaveOccurred())
+ })
+
+ It("bad conn check", func() {
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ Expect(connCheck(conn)).To(HaveOccurred())
+ })
+
+ It("check conn deadline", func() {
+ Expect(conn.SetDeadline(time.Now())).NotTo(HaveOccurred())
+ time.Sleep(time.Millisecond * 10)
+ Expect(connCheck(conn)).NotTo(HaveOccurred())
+ Expect(conn.Close()).NotTo(HaveOccurred())
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go
index 75dd4ad..f3a65f8 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/export_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/export_test.go
@@ -1,9 +1,14 @@
package pool
import (
+ "net"
"time"
)
func (cn *Conn) SetCreatedAt(tm time.Time) {
cn.createdAt = tm
}
+
+func (cn *Conn) NetConn() net.Conn {
+ return cn.netConn
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go
new file mode 100644
index 0000000..d993301
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/main_test.go
@@ -0,0 +1,123 @@
+package pool_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "pool")
+}
+
+func perform(n int, cbs ...func(int)) {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ wg.Wait()
+}
+
+func dummyDialer(context.Context) (net.Conn, error) {
+ return newDummyConn(), nil
+}
+
+func newDummyConn() net.Conn {
+ return &dummyConn{
+ rawConn: new(dummyRawConn),
+ }
+}
+
+var (
+ _ net.Conn = (*dummyConn)(nil)
+ _ syscall.Conn = (*dummyConn)(nil)
+)
+
+type dummyConn struct {
+ rawConn *dummyRawConn
+}
+
+func (d *dummyConn) SyscallConn() (syscall.RawConn, error) {
+ return d.rawConn, nil
+}
+
+var errDummy = fmt.Errorf("dummyConn err")
+
+func (d *dummyConn) Read(b []byte) (n int, err error) {
+ return 0, errDummy
+}
+
+func (d *dummyConn) Write(b []byte) (n int, err error) {
+ return 0, errDummy
+}
+
+func (d *dummyConn) Close() error {
+ d.rawConn.Close()
+ return nil
+}
+
+func (d *dummyConn) LocalAddr() net.Addr {
+ return &net.TCPAddr{}
+}
+
+func (d *dummyConn) RemoteAddr() net.Addr {
+ return &net.TCPAddr{}
+}
+
+func (d *dummyConn) SetDeadline(t time.Time) error {
+ return nil
+}
+
+func (d *dummyConn) SetReadDeadline(t time.Time) error {
+ return nil
+}
+
+func (d *dummyConn) SetWriteDeadline(t time.Time) error {
+ return nil
+}
+
+var _ syscall.RawConn = (*dummyRawConn)(nil)
+
+type dummyRawConn struct {
+ mu sync.Mutex
+ closed bool
+}
+
+func (d *dummyRawConn) Control(f func(fd uintptr)) error {
+ return nil
+}
+
+func (d *dummyRawConn) Read(f func(fd uintptr) (done bool)) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ if d.closed {
+ return fmt.Errorf("dummyRawConn closed")
+ }
+ return nil
+}
+
+func (d *dummyRawConn) Write(f func(fd uintptr) (done bool)) error {
+ return nil
+}
+
+func (d *dummyRawConn) Close() {
+ d.mu.Lock()
+ d.closed = true
+ d.mu.Unlock()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go
index 44a4e77..2125f3e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool.go
@@ -8,13 +8,17 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
+ "github.com/redis/go-redis/v9/internal"
)
var (
// ErrClosed performs any operation on the closed client will return this error.
ErrClosed = errors.New("redis: client is closed")
+ // ErrPoolExhausted is returned from a pool connection method
+ // when the maximum number of database connections in the pool has been reached.
+ ErrPoolExhausted = errors.New("redis: connection pool exhausted")
+
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
ErrPoolTimeout = errors.New("redis: connection pool timeout")
)
@@ -54,16 +58,16 @@ type Pooler interface {
}
type Options struct {
- Dialer func(context.Context) (net.Conn, error)
- OnClose func(*Conn) error
+ Dialer func(context.Context) (net.Conn, error)
- PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolFIFO bool
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
}
type lastDialErrorWrap struct {
@@ -71,66 +75,67 @@ type lastDialErrorWrap struct {
}
type ConnPool struct {
- opt *Options
+ cfg *Options
dialErrorsNum uint32 // atomic
-
lastDialError atomic.Value
queue chan struct{}
- connsMu sync.Mutex
- conns []*Conn
- idleConns []*Conn
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+
poolSize int
idleConnsLen int
stats Stats
- _closed uint32 // atomic
- closedCh chan struct{}
+ _closed uint32 // atomic
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
- opt: opt,
+ cfg: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
- closedCh: make(chan struct{}),
}
p.connsMu.Lock()
p.checkMinIdleConns()
p.connsMu.Unlock()
- if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
- go p.reaper(opt.IdleCheckFrequency)
- }
-
return p
}
func (p *ConnPool) checkMinIdleConns() {
- if p.opt.MinIdleConns == 0 {
+ if p.cfg.MinIdleConns == 0 {
return
}
- for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
- p.poolSize++
- p.idleConnsLen++
-
- go func() {
- err := p.addIdleConn()
- if err != nil && err != ErrClosed {
- p.connsMu.Lock()
- p.poolSize--
- p.idleConnsLen--
- p.connsMu.Unlock()
- }
- }()
+ for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns {
+ select {
+ case p.queue <- struct{}{}:
+ p.poolSize++
+ p.idleConnsLen++
+
+ go func() {
+ err := p.addIdleConn()
+ if err != nil && err != ErrClosed {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+
+ p.freeTurn()
+ }()
+ default:
+ return
+ }
}
}
@@ -159,6 +164,17 @@ func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
}
func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ p.connsMu.Lock()
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
+ p.connsMu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+ p.connsMu.Unlock()
+
cn, err := p.dialConn(ctx, pooled)
if err != nil {
return nil, err
@@ -167,16 +183,15 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
p.connsMu.Lock()
defer p.connsMu.Unlock()
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
_ = cn.Close()
- return nil, ErrClosed
+ return nil, ErrPoolExhausted
}
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
- if p.poolSize >= p.opt.PoolSize {
+ if p.poolSize >= p.cfg.PoolSize {
cn.pooled = false
} else {
p.poolSize++
@@ -191,14 +206,14 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
return nil, ErrClosed
}
- if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.cfg.PoolSize) {
return nil, p.getLastDialError()
}
- netConn, err := p.opt.Dialer(ctx)
+ netConn, err := p.cfg.Dialer(ctx)
if err != nil {
p.setLastDialError(err)
- if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) {
go p.tryDial()
}
return nil, err
@@ -215,7 +230,7 @@ func (p *ConnPool) tryDial() {
return
}
- conn, err := p.opt.Dialer(context.Background())
+ conn, err := p.cfg.Dialer(context.Background())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
@@ -256,6 +271,7 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
p.connsMu.Unlock()
if err != nil {
+ p.freeTurn()
return nil, err
}
@@ -263,7 +279,7 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
break
}
- if p.isStaleConn(cn) {
+ if !p.isHealthyConn(cn) {
_ = p.CloseConn(cn)
continue
}
@@ -283,10 +299,6 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
return newcn, nil
}
-func (p *ConnPool) getTurn() {
- p.queue <- struct{}{}
-}
-
func (p *ConnPool) waitTurn(ctx context.Context) error {
select {
case <-ctx.Done():
@@ -301,7 +313,7 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
}
timer := timers.Get().(*time.Timer)
- timer.Reset(p.opt.PoolTimeout)
+ timer.Reset(p.cfg.PoolTimeout)
select {
case <-ctx.Done():
@@ -337,7 +349,7 @@ func (p *ConnPool) popIdle() (*Conn, error) {
}
var cn *Conn
- if p.opt.PoolFIFO {
+ if p.cfg.PoolFIFO {
cn = p.idleConns[0]
copy(p.idleConns, p.idleConns[1:])
p.idleConns = p.idleConns[:n-1]
@@ -363,14 +375,28 @@ func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
return
}
+ var shouldCloseConn bool
+
p.connsMu.Lock()
- p.idleConns = append(p.idleConns, cn)
- p.idleConnsLen++
+
+ if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns {
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ } else {
+ p.removeConn(cn)
+ shouldCloseConn = true
+ }
+
p.connsMu.Unlock()
+
p.freeTurn()
+
+ if shouldCloseConn {
+ _ = p.closeConn(cn)
+ }
}
-func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
@@ -383,8 +409,8 @@ func (p *ConnPool) CloseConn(cn *Conn) error {
func (p *ConnPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
+ defer p.connsMu.Unlock()
p.removeConn(cn)
- p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
@@ -395,15 +421,13 @@ func (p *ConnPool) removeConn(cn *Conn) {
p.poolSize--
p.checkMinIdleConns()
}
- return
+ break
}
}
+ atomic.AddUint32(&p.stats.StaleConns, 1)
}
func (p *ConnPool) closeConn(cn *Conn) error {
- if p.opt.OnClose != nil {
- _ = p.opt.OnClose(cn)
- }
return cn.Close()
}
@@ -424,14 +448,13 @@ func (p *ConnPool) IdleLen() int {
}
func (p *ConnPool) Stats() *Stats {
- idleLen := p.IdleLen()
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
- IdleConns: uint32(idleLen),
+ IdleConns: uint32(p.IdleLen()),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
@@ -459,7 +482,6 @@ func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
- close(p.closedCh)
var firstErr error
p.connsMu.Lock()
@@ -477,81 +499,20 @@ func (p *ConnPool) Close() error {
return firstErr
}
-func (p *ConnPool) reaper(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // It is possible that ticker and closedCh arrive together,
- // and select pseudo-randomly pick ticker case, we double
- // check here to prevent being executed after closed.
- if p.closed() {
- return
- }
- _, err := p.ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
- continue
- }
- case <-p.closedCh:
- return
- }
- }
-}
-
-func (p *ConnPool) ReapStaleConns() (int, error) {
- var n int
- for {
- p.getTurn()
-
- p.connsMu.Lock()
- cn := p.reapStaleConn()
- p.connsMu.Unlock()
-
- p.freeTurn()
-
- if cn != nil {
- _ = p.closeConn(cn)
- n++
- } else {
- break
- }
- }
- atomic.AddUint32(&p.stats.StaleConns, uint32(n))
- return n, nil
-}
-
-func (p *ConnPool) reapStaleConn() *Conn {
- if len(p.idleConns) == 0 {
- return nil
- }
+func (p *ConnPool) isHealthyConn(cn *Conn) bool {
+ now := time.Now()
- cn := p.idleConns[0]
- if !p.isStaleConn(cn) {
- return nil
+ if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime {
+ return false
}
-
- p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
- p.idleConnsLen--
- p.removeConn(cn)
-
- return cn
-}
-
-func (p *ConnPool) isStaleConn(cn *Conn) bool {
- if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime {
return false
}
- now := time.Now()
- if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
- return true
- }
- if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
- return true
+ if connCheck(cn.netConn) != nil {
+ return false
}
- return false
+ cn.SetUsedAt(now)
+ return true
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go
index 5a3fde1..5a3fde1 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_single.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_single.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go
index 3adb99b..3adb99b 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_sticky.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_sticky.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go
index 423a783..76dec99 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/pool/pool_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/pool/pool_test.go
@@ -7,10 +7,10 @@ import (
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
var _ = Describe("ConnPool", func() {
@@ -19,11 +19,10 @@ var _ = Describe("ConnPool", func() {
BeforeEach(func() {
connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- PoolTimeout: time.Hour,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ ConnMaxIdleTime: time.Millisecond,
})
})
@@ -45,11 +44,10 @@ var _ = Describe("ConnPool", func() {
<-closedChan
return &net.TCPConn{}, nil
},
- PoolSize: 10,
- PoolTimeout: time.Hour,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
- MinIdleConns: minIdleConns,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ ConnMaxIdleTime: time.Millisecond,
+ MinIdleConns: minIdleConns,
})
wg.Wait()
Expect(connPool.Close()).NotTo(HaveOccurred())
@@ -127,12 +125,11 @@ var _ = Describe("MinIdleConns", func() {
newConnPool := func() *pool.ConnPool {
connPool := pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: poolSize,
- MinIdleConns: minIdleConns,
- PoolTimeout: 100 * time.Millisecond,
- IdleTimeout: -1,
- IdleCheckFrequency: -1,
+ Dialer: dummyDialer,
+ PoolSize: poolSize,
+ MinIdleConns: minIdleConns,
+ PoolTimeout: 100 * time.Millisecond,
+ ConnMaxIdleTime: -1,
})
Eventually(func() int {
return connPool.Len()
@@ -287,130 +284,6 @@ var _ = Describe("MinIdleConns", func() {
})
})
-var _ = Describe("conns reaper", func() {
- const idleTimeout = time.Minute
- const maxAge = time.Hour
-
- ctx := context.Background()
- var connPool *pool.ConnPool
- var conns, staleConns, closedConns []*pool.Conn
-
- assert := func(typ string) {
- BeforeEach(func() {
- closedConns = nil
- connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- IdleTimeout: idleTimeout,
- MaxConnAge: maxAge,
- PoolTimeout: time.Second,
- IdleCheckFrequency: time.Hour,
- OnClose: func(cn *pool.Conn) error {
- closedConns = append(closedConns, cn)
- return nil
- },
- })
-
- conns = nil
-
- // add stale connections
- staleConns = nil
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- switch typ {
- case "idle":
- cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
- case "aged":
- cn.SetCreatedAt(time.Now().Add(-2 * maxAge))
- }
- conns = append(conns, cn)
- staleConns = append(staleConns, cn)
- }
-
- // add fresh connections
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- conns = append(conns, cn)
- }
-
- for _, cn := range conns {
- connPool.Put(ctx, cn)
- }
-
- Expect(connPool.Len()).To(Equal(6))
- Expect(connPool.IdleLen()).To(Equal(6))
-
- n, err := connPool.ReapStaleConns()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(3))
- })
-
- AfterEach(func() {
- _ = connPool.Close()
- Expect(connPool.Len()).To(Equal(0))
- Expect(connPool.IdleLen()).To(Equal(0))
- Expect(len(closedConns)).To(Equal(len(conns)))
- Expect(closedConns).To(ConsistOf(conns))
- })
-
- It("reaps stale connections", func() {
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(3))
- })
-
- It("does not reap fresh connections", func() {
- n, err := connPool.ReapStaleConns()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(0))
- })
-
- It("stale connections are closed", func() {
- Expect(len(closedConns)).To(Equal(len(staleConns)))
- Expect(closedConns).To(ConsistOf(staleConns))
- })
-
- It("pool is functional", func() {
- for j := 0; j < 3; j++ {
- var freeCns []*pool.Conn
- for i := 0; i < 3; i++ {
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cn).NotTo(BeNil())
- freeCns = append(freeCns, cn)
- }
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- cn, err := connPool.Get(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cn).NotTo(BeNil())
- conns = append(conns, cn)
-
- Expect(connPool.Len()).To(Equal(4))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- connPool.Remove(ctx, cn, nil)
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(0))
-
- for _, cn := range freeCns {
- connPool.Put(ctx, cn)
- }
-
- Expect(connPool.Len()).To(Equal(3))
- Expect(connPool.IdleLen()).To(Equal(3))
- }
- })
- }
-
- assert("idle")
- assert("aged")
-})
-
var _ = Describe("race", func() {
ctx := context.Background()
var connPool *pool.ConnPool
@@ -430,11 +303,10 @@ var _ = Describe("race", func() {
It("does not happen on Get, Put, and Remove", func() {
connPool = pool.NewConnPool(&pool.Options{
- Dialer: dummyDialer,
- PoolSize: 10,
- PoolTimeout: time.Minute,
- IdleTimeout: time.Millisecond,
- IdleCheckFrequency: time.Millisecond,
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Minute,
+ ConnMaxIdleTime: time.Millisecond,
})
perform(C, func(id int) {
@@ -455,4 +327,30 @@ var _ = Describe("race", func() {
}
})
})
+
+ It("limit the number of connections", func() {
+ opt := &pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return &net.TCPConn{}, nil
+ },
+ PoolSize: 1000,
+ MinIdleConns: 50,
+ PoolTimeout: 3 * time.Second,
+ }
+ p := pool.NewConnPool(opt)
+
+ var wg sync.WaitGroup
+ for i := 0; i < opt.PoolSize; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, _ = p.Get(ctx)
+ }()
+ }
+ wg.Wait()
+
+ stats := p.Stats()
+ Expect(stats.IdleConns).To(Equal(uint32(0)))
+ Expect(stats.TotalConns).To(Equal(uint32(opt.PoolSize)))
+ })
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go
index c9a820e..87818ca 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/proto_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/proto_test.go
@@ -3,8 +3,8 @@ package proto_test
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
)
func TestGinkgoSuite(t *testing.T) {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go
new file mode 100644
index 0000000..8d23817
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader.go
@@ -0,0 +1,552 @@
+package proto
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+ RespStatus = '+' // +<string>\r\n
+ RespError = '-' // -<string>\r\n
+ RespString = '$' // $<length>\r\n<bytes>\r\n
+ RespInt = ':' // :<number>\r\n
+ RespNil = '_' // _\r\n
+ RespFloat = ',' // ,<floating-point-number>\r\n (golang float)
+ RespBool = '#' // true: #t\r\n false: #f\r\n
+ RespBlobError = '!' // !<length>\r\n<bytes>\r\n
+ RespVerbatim = '=' // =<length>\r\nFORMAT:<bytes>\r\n
+ RespBigInt = '(' // (<big number>\r\n
+ RespArray = '*' // *<len>\r\n... (same as resp2)
+ RespMap = '%' // %<len>\r\n(key)\r\n(value)\r\n... (golang map)
+ RespSet = '~' // ~<len>\r\n... (same as Array)
+ RespAttr = '|' // |<len>\r\n(key)\r\n(value)\r\n... + command reply
+ RespPush = '>' // ><len>\r\n... (same as Array)
+)
+
+// Not used temporarily.
+// Redis has not used these two data types for the time being, and will implement them later.
+// Streamed = "EOF:"
+// StreamedAggregated = '?'
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(line[1:])
+}
+
+//------------------------------------------------------------------------------
+
+type Reader struct {
+ rd *bufio.Reader
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+// PeekReplyType returns the data type of the next response without advancing the Reader,
+// and discard the attribute type.
+func (r *Reader) PeekReplyType() (byte, error) {
+ b, err := r.rd.Peek(1)
+ if err != nil {
+ return 0, err
+ }
+ if b[0] == RespAttr {
+ if err = r.DiscardNext(); err != nil {
+ return 0, err
+ }
+ return r.PeekReplyType()
+ }
+ return b[0], nil
+}
+
+// ReadLine Return a valid reply, it will check the protocol or redis error,
+// and discard the attribute type.
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case RespError:
+ return nil, ParseErrorReply(line)
+ case RespNil:
+ return nil, Nil
+ case RespBlobError:
+ var blobErr string
+ blobErr, err = r.readStringReply(line)
+ if err == nil {
+ err = RedisError(blobErr)
+ }
+ return nil, err
+ case RespAttr:
+ if err = r.Discard(line); err != nil {
+ return nil, err
+ }
+ return r.ReadLine()
+ }
+
+ // Compatible with RESP2
+ if IsNilReply(line) {
+ return nil, Nil
+ }
+
+ return line, nil
+}
+
+// readLine returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...) //nolint:makezero
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply() (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case RespStatus:
+ return string(line[1:]), nil
+ case RespInt:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespFloat:
+ return r.readFloat(line)
+ case RespBool:
+ return r.readBool(line)
+ case RespBigInt:
+ return r.readBigInt(line)
+
+ case RespString:
+ return r.readStringReply(line)
+ case RespVerbatim:
+ return r.readVerb(line)
+
+ case RespArray, RespSet, RespPush:
+ return r.readSlice(line)
+ case RespMap:
+ return r.readMap(line)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) readFloat(line []byte) (float64, error) {
+ v := string(line[1:])
+ switch string(line[1:]) {
+ case "inf":
+ return math.Inf(1), nil
+ case "-inf":
+ return math.Inf(-1), nil
+ case "nan", "-nan":
+ return math.NaN(), nil
+ }
+ return strconv.ParseFloat(v, 64)
+}
+
+func (r *Reader) readBool(line []byte) (bool, error) {
+ switch string(line[1:]) {
+ case "t":
+ return true, nil
+ case "f":
+ return false, nil
+ }
+ return false, fmt.Errorf("redis: can't parse bool reply: %q", line)
+}
+
+func (r *Reader) readBigInt(line []byte) (*big.Int, error) {
+ i := new(big.Int)
+ if i, ok := i.SetString(string(line[1:]), 10); ok {
+ return i, nil
+ }
+ return nil, fmt.Errorf("redis: can't parse bigInt reply: %q", line)
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, n+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:n]), nil
+}
+
+func (r *Reader) readVerb(line []byte) (string, error) {
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return "", err
+ }
+ if len(s) < 4 || s[3] != ':' {
+ return "", fmt.Errorf("redis: can't parse verbatim string reply: %q", line)
+ }
+ return s[4:], nil
+}
+
+func (r *Reader) readSlice(line []byte) ([]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+
+ val := make([]interface{}, n)
+ for i := 0; i < len(val); i++ {
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ val[i] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ val[i] = err
+ continue
+ }
+ return nil, err
+ }
+ val[i] = v
+ }
+ return val, nil
+}
+
+func (r *Reader) readMap(line []byte) (map[interface{}]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[interface{}]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := r.ReadReply()
+ if err != nil {
+ return nil, err
+ }
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ m[k] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ m[k] = err
+ continue
+ }
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+}
+
+// -------------------------------
+
+func (r *Reader) ReadInt() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsInt64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Int64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseUint(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsUint64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Uint64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse uint reply: %.100q", line)
+}
+
+func (r *Reader) ReadFloat() (float64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespFloat:
+ return r.readFloat(line)
+ case RespStatus:
+ return strconv.ParseFloat(string(line[1:]), 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseFloat(s, 64)
+ }
+ return 0, fmt.Errorf("redis: can't parse float reply: %.100q", line)
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+
+ switch line[0] {
+ case RespStatus, RespInt, RespFloat:
+ return string(line[1:]), nil
+ case RespString:
+ return r.readStringReply(line)
+ case RespBool:
+ b, err := r.readBool(line)
+ return strconv.FormatBool(b), err
+ case RespVerbatim:
+ return r.readVerb(line)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return "", err
+ }
+ return b.String(), nil
+ }
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+}
+
+func (r *Reader) ReadBool() (bool, error) {
+ s, err := r.ReadString()
+ if err != nil {
+ return false, err
+ }
+ return s == "OK" || s == "1" || s == "true", nil
+}
+
+func (r *Reader) ReadSlice() ([]interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ return r.readSlice(line)
+}
+
+// ReadFixedArrayLen read fixed array length.
+func (r *Reader) ReadFixedArrayLen(fixedLen int) error {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the array, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadArrayLen Read and return the length of the array.
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespArray, RespSet, RespPush:
+ return replyLen(line)
+ default:
+ return 0, fmt.Errorf("redis: can't parse array/set/push reply: %.100q", line)
+ }
+}
+
+// ReadFixedMapLen reads fixed map length.
+func (r *Reader) ReadFixedMapLen(fixedLen int) error {
+ n, err := r.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the map, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadMapLen reads the length of the map type.
+// If responding to the array type (RespArray/RespSet/RespPush),
+// it must be a multiple of 2 and return n/2.
+// Other types will return an error.
+func (r *Reader) ReadMapLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespMap:
+ return replyLen(line)
+ case RespArray, RespSet, RespPush:
+ // Some commands and RESP2 protocol may respond to array types.
+ n, err := replyLen(line)
+ if err != nil {
+ return 0, err
+ }
+ if n%2 != 0 {
+ return 0, fmt.Errorf("redis: the length of the array must be a multiple of 2, got: %d", n)
+ }
+ return n / 2, nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse map reply: %.100q", line)
+ }
+}
+
+// DiscardNext read and discard the data represented by the next line.
+func (r *Reader) DiscardNext() error {
+ line, err := r.readLine()
+ if err != nil {
+ return err
+ }
+ return r.Discard(line)
+}
+
+// Discard the data represented by line.
+func (r *Reader) Discard(line []byte) (err error) {
+ if len(line) == 0 {
+ return errors.New("redis: invalid line")
+ }
+ switch line[0] {
+ case RespStatus, RespError, RespInt, RespNil, RespFloat, RespBool, RespBigInt:
+ return nil
+ }
+
+ n, err := replyLen(line)
+ if err != nil && err != Nil {
+ return err
+ }
+
+ switch line[0] {
+ case RespBlobError, RespString, RespVerbatim:
+ // +\r\n
+ _, err = r.rd.Discard(n + 2)
+ return err
+ case RespArray, RespSet, RespPush:
+ for i := 0; i < n; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ case RespMap, RespAttr:
+ // Read key & value.
+ for i := 0; i < n*2; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ return fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func replyLen(line []byte) (n int, err error) {
+ n, err = util.Atoi(line[1:])
+ if err != nil {
+ return 0, err
+ }
+
+ if n < -1 {
+ return 0, fmt.Errorf("redis: invalid reply: %q", line)
+ }
+
+ switch line[0] {
+ case RespString, RespVerbatim, RespBlobError,
+ RespArray, RespSet, RespPush, RespMap, RespAttr:
+ if n == -1 {
+ return 0, Nil
+ }
+ }
+ return n, nil
+}
+
+// IsNilReply detects redis.Nil of RESP2.
+func IsNilReply(line []byte) bool {
+ return len(line) == 3 &&
+ (line[0] == RespString || line[0] == RespArray) &&
+ line[1] == '-' && line[2] == '1'
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go
new file mode 100644
index 0000000..2d5f56c
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/reader_test.go
@@ -0,0 +1,100 @@
+package proto_test
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+func BenchmarkReader_ParseReply_Status(b *testing.B) {
+ benchmarkParseReply(b, "+OK\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Int(b *testing.B) {
+ benchmarkParseReply(b, ":1\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Float(b *testing.B) {
+ benchmarkParseReply(b, ",123.456\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Bool(b *testing.B) {
+ benchmarkParseReply(b, "#t\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_BigInt(b *testing.B) {
+ benchmarkParseReply(b, "(3492890328409238509324850943850943825024385\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Error(b *testing.B) {
+ benchmarkParseReply(b, "-Error message\r\n", true)
+}
+
+func BenchmarkReader_ParseReply_Nil(b *testing.B) {
+ benchmarkParseReply(b, "_\r\n", true)
+}
+
+func BenchmarkReader_ParseReply_BlobError(b *testing.B) {
+ benchmarkParseReply(b, "!21\r\nSYNTAX invalid syntax", true)
+}
+
+func BenchmarkReader_ParseReply_String(b *testing.B) {
+ benchmarkParseReply(b, "$5\r\nhello\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Verb(b *testing.B) {
+ benchmarkParseReply(b, "$9\r\ntxt:hello\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Slice(b *testing.B) {
+ benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Set(b *testing.B) {
+ benchmarkParseReply(b, "~2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Push(b *testing.B) {
+ benchmarkParseReply(b, ">2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Map(b *testing.B) {
+ benchmarkParseReply(b, "%2\r\n$5\r\nhello\r\n$5\r\nworld\r\n+key\r\n+value\r\n", false)
+}
+
+func BenchmarkReader_ParseReply_Attr(b *testing.B) {
+ benchmarkParseReply(b, "%1\r\n+key\r\n+value\r\n+hello\r\n", false)
+}
+
+func TestReader_ReadLine(t *testing.T) {
+ original := bytes.Repeat([]byte("a"), 8192)
+ original[len(original)-2] = '\r'
+ original[len(original)-1] = '\n'
+ r := proto.NewReader(bytes.NewReader(original))
+ read, err := r.ReadLine()
+ if err != nil && err != io.EOF {
+ t.Errorf("Should be able to read the full buffer: %v", err)
+ }
+
+ if !bytes.Equal(read, original[:len(original)-2]) {
+ t.Errorf("Values must be equal: %d expected %d", len(read), len(original[:len(original)-2]))
+ }
+}
+
+func benchmarkParseReply(b *testing.B, reply string, wanterr bool) {
+ buf := new(bytes.Buffer)
+ for i := 0; i < b.N; i++ {
+ buf.WriteString(reply)
+ }
+ p := proto.NewReader(buf)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := p.ReadReply()
+ if !wanterr && err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go
index 0e99476..5223069 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan.go
@@ -3,13 +3,15 @@ package proto
import (
"encoding"
"fmt"
+ "net"
"reflect"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
// Scan parses bytes `b` to `v` with appropriate type.
+//
//nolint:gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
@@ -115,6 +117,9 @@ func Scan(b []byte, v interface{}) error {
return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
+ case *net.IP:
+ *v = b
+ return nil
default:
return fmt.Errorf(
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go
index 55df550..e0cd2f7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/scan_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/scan_test.go
@@ -3,10 +3,10 @@ package proto_test
import (
"encoding/json"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/proto"
)
type testScanSliceStruct struct {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go
index c426098..78595cc 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/proto/writer.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer.go
@@ -4,16 +4,17 @@ import (
"encoding"
"fmt"
"io"
+ "net"
"strconv"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
type writer interface {
io.Writer
io.ByteWriter
- // io.StringWriter
+ // WriteString implement io.StringWriter.
WriteString(s string) (n int, err error)
}
@@ -34,7 +35,7 @@ func NewWriter(wr writer) *Writer {
}
func (w *Writer) WriteArgs(args []interface{}) error {
- if err := w.WriteByte(ArrayReply); err != nil {
+ if err := w.WriteByte(RespArray); err != nil {
return err
}
@@ -64,37 +65,68 @@ func (w *Writer) WriteArg(v interface{}) error {
return w.string("")
case string:
return w.string(v)
+ case *string:
+ return w.string(*v)
case []byte:
return w.bytes(v)
case int:
return w.int(int64(v))
+ case *int:
+ return w.int(int64(*v))
case int8:
return w.int(int64(v))
+ case *int8:
+ return w.int(int64(*v))
case int16:
return w.int(int64(v))
+ case *int16:
+ return w.int(int64(*v))
case int32:
return w.int(int64(v))
+ case *int32:
+ return w.int(int64(*v))
case int64:
return w.int(v)
+ case *int64:
+ return w.int(*v)
case uint:
return w.uint(uint64(v))
+ case *uint:
+ return w.uint(uint64(*v))
case uint8:
return w.uint(uint64(v))
+ case *uint8:
+ return w.uint(uint64(*v))
case uint16:
return w.uint(uint64(v))
+ case *uint16:
+ return w.uint(uint64(*v))
case uint32:
return w.uint(uint64(v))
+ case *uint32:
+ return w.uint(uint64(*v))
case uint64:
return w.uint(v)
+ case *uint64:
+ return w.uint(*v)
case float32:
return w.float(float64(v))
+ case *float32:
+ return w.float(float64(*v))
case float64:
return w.float(v)
+ case *float64:
+ return w.float(*v)
case bool:
if v {
return w.int(1)
}
return w.int(0)
+ case *bool:
+ if *v {
+ return w.int(1)
+ }
+ return w.int(0)
case time.Time:
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
return w.bytes(w.numBuf)
@@ -106,6 +138,8 @@ func (w *Writer) WriteArg(v interface{}) error {
return err
}
return w.bytes(b)
+ case net.IP:
+ return w.bytes(v)
default:
return fmt.Errorf(
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
@@ -113,7 +147,7 @@ func (w *Writer) WriteArg(v interface{}) error {
}
func (w *Writer) bytes(b []byte) error {
- if err := w.WriteByte(StringReply); err != nil {
+ if err := w.WriteByte(RespString); err != nil {
return err
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go
new file mode 100644
index 0000000..7c9d208
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/proto/writer_test.go
@@ -0,0 +1,154 @@
+package proto_test
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type MyType struct{}
+
+var _ encoding.BinaryMarshaler = (*MyType)(nil)
+
+func (t *MyType) MarshalBinary() ([]byte, error) {
+ return []byte("hello"), nil
+}
+
+var _ = Describe("WriteBuffer", func() {
+ var buf *bytes.Buffer
+ var wr *proto.Writer
+
+ BeforeEach(func() {
+ buf = new(bytes.Buffer)
+ wr = proto.NewWriter(buf)
+ })
+
+ It("should write args", func() {
+ err := wr.WriteArgs([]interface{}{
+ "string",
+ 12,
+ 34.56,
+ []byte{'b', 'y', 't', 'e', 's'},
+ true,
+ nil,
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
+ "$6\r\nstring\r\n" +
+ "$2\r\n12\r\n" +
+ "$5\r\n34.56\r\n" +
+ "$5\r\nbytes\r\n" +
+ "$1\r\n1\r\n" +
+ "$0\r\n" +
+ "\r\n")))
+ })
+
+ It("should append time", func() {
+ tm := time.Date(2019, 1, 1, 9, 45, 10, 222125, time.UTC)
+ err := wr.WriteArgs([]interface{}{tm})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(41))
+ })
+
+ It("should append marshalable args", func() {
+ err := wr.WriteArgs([]interface{}{&MyType{}})
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(buf.Len()).To(Equal(15))
+ })
+
+ It("should append net.IP", func() {
+ ip := net.ParseIP("192.168.1.1")
+ err := wr.WriteArgs([]interface{}{ip})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.String()).To(Equal(fmt.Sprintf("*1\r\n$16\r\n%s\r\n", bytes.NewBuffer(ip))))
+ })
+})
+
+type discard struct{}
+
+func (discard) Write(b []byte) (int, error) {
+ return len(b), nil
+}
+
+func (discard) WriteString(s string) (int, error) {
+ return len(s), nil
+}
+
+func (discard) WriteByte(c byte) error {
+ return nil
+}
+
+func BenchmarkWriteBuffer_Append(b *testing.B) {
+ buf := proto.NewWriter(discard{})
+ args := []interface{}{"hello", "world", "foo", "bar"}
+
+ for i := 0; i < b.N; i++ {
+ err := buf.WriteArgs(args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+var _ = Describe("WriteArg", func() {
+ var buf *bytes.Buffer
+ var wr *proto.Writer
+
+ BeforeEach(func() {
+ buf = new(bytes.Buffer)
+ wr = proto.NewWriter(buf)
+ })
+
+ args := map[any]string{
+ "hello": "$5\r\nhello\r\n",
+ int(10): "$2\r\n10\r\n",
+ util.ToPtr(int(10)): "$2\r\n10\r\n",
+ int8(10): "$2\r\n10\r\n",
+ util.ToPtr(int8(10)): "$2\r\n10\r\n",
+ int16(10): "$2\r\n10\r\n",
+ util.ToPtr(int16(10)): "$2\r\n10\r\n",
+ int32(10): "$2\r\n10\r\n",
+ util.ToPtr(int32(10)): "$2\r\n10\r\n",
+ int64(10): "$2\r\n10\r\n",
+ util.ToPtr(int64(10)): "$2\r\n10\r\n",
+ uint(10): "$2\r\n10\r\n",
+ util.ToPtr(uint(10)): "$2\r\n10\r\n",
+ uint8(10): "$2\r\n10\r\n",
+ util.ToPtr(uint8(10)): "$2\r\n10\r\n",
+ uint16(10): "$2\r\n10\r\n",
+ util.ToPtr(uint16(10)): "$2\r\n10\r\n",
+ uint32(10): "$2\r\n10\r\n",
+ util.ToPtr(uint32(10)): "$2\r\n10\r\n",
+ uint64(10): "$2\r\n10\r\n",
+ util.ToPtr(uint64(10)): "$2\r\n10\r\n",
+ float32(10.3): "$18\r\n10.300000190734863\r\n",
+ util.ToPtr(float32(10.3)): "$18\r\n10.300000190734863\r\n",
+ float64(10.3): "$4\r\n10.3\r\n",
+ util.ToPtr(float64(10.3)): "$4\r\n10.3\r\n",
+ bool(true): "$1\r\n1\r\n",
+ bool(false): "$1\r\n0\r\n",
+ util.ToPtr(bool(true)): "$1\r\n1\r\n",
+ util.ToPtr(bool(false)): "$1\r\n0\r\n",
+ }
+
+ for arg, expect := range args {
+ arg, expect := arg, expect
+ It(fmt.Sprintf("should write arg of type %T", arg), func() {
+ err := wr.WriteArg(arg)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.String()).To(Equal(expect))
+ })
+ }
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go
index 2edccba..2edccba 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/rand/rand.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/rand/rand.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go
index e34a7f0..ed81ad7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util.go
@@ -2,9 +2,10 @@ package internal
import (
"context"
+ "strings"
"time"
- "github.com/go-redis/redis/v8/internal/util"
+ "github.com/redis/go-redis/v9/internal/util"
)
func Sleep(ctx context.Context, dur time.Duration) error {
@@ -44,3 +45,22 @@ func isLower(s string) bool {
}
return true
}
+
+func ReplaceSpaces(s string) string {
+ // Pre-allocate a builder with the same length as s to minimize allocations.
+ // This is a basic optimization; adjust the initial size based on your use case.
+ var builder strings.Builder
+ builder.Grow(len(s))
+
+ for _, char := range s {
+ if char == ' ' {
+ // Replace space with a hyphen.
+ builder.WriteRune('-')
+ } else {
+ // Copy the character as-is.
+ builder.WriteRune(char)
+ }
+ }
+
+ return builder.String()
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go
index 2130711..8178f86 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/safe.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/safe.go
@@ -1,5 +1,4 @@
//go:build appengine
-// +build appengine
package util
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go
index db50338..db50338 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/strconv.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/strconv.go
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go
new file mode 100644
index 0000000..a7ea712
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/type.go
@@ -0,0 +1,5 @@
+package util
+
+func ToPtr[T any](v T) *T {
+ return &v
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go
index daa8d76..cbcd2cc 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/internal/util/unsafe.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util/unsafe.go
@@ -1,5 +1,4 @@
//go:build !appengine
-// +build !appengine
package util
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go
new file mode 100644
index 0000000..f090eba
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal/util_test.go
@@ -0,0 +1,53 @@
+package internal
+
+import (
+ "strings"
+ "testing"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+func BenchmarkToLowerStd(b *testing.B) {
+ str := "AaBbCcDdEeFfGgHhIiJjKk"
+ for i := 0; i < b.N; i++ {
+ _ = strings.ToLower(str)
+ }
+}
+
+// util.ToLower is 3x faster than strings.ToLower.
+func BenchmarkToLowerInternal(b *testing.B) {
+ str := "AaBbCcDdEeFfGgHhIiJjKk"
+ for i := 0; i < b.N; i++ {
+ _ = ToLower(str)
+ }
+}
+
+func TestToLower(t *testing.T) {
+ It("toLower", func() {
+ str := "AaBbCcDdEeFfGg"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "ABCDE"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "ABCDE"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+
+ str = "abced"
+ Expect(ToLower(str)).To(Equal(strings.ToLower(str)))
+ })
+}
+
+func TestIsLower(t *testing.T) {
+ It("isLower", func() {
+ str := "AaBbCcDdEeFfGg"
+ Expect(isLower(str)).To(BeFalse())
+
+ str = "ABCDE"
+ Expect(isLower(str)).To(BeFalse())
+
+ str = "abcdefg"
+ Expect(isLower(str)).To(BeTrue())
+ })
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go
new file mode 100644
index 0000000..a631719
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/internal_test.go
@@ -0,0 +1,354 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+)
+
+var _ = Describe("newClusterState", func() {
+ var state *clusterState
+
+ createClusterState := func(slots []ClusterSlot) *clusterState {
+ opt := &ClusterOptions{}
+ opt.init()
+ nodes := newClusterNodes(opt)
+ state, err := newClusterState(nodes, slots, "10.10.10.10:1234")
+ Expect(err).NotTo(HaveOccurred())
+ return state
+ }
+
+ Describe("sorting", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Start: 1000,
+ End: 1999,
+ }, {
+ Start: 0,
+ End: 999,
+ }, {
+ Start: 2000,
+ End: 2999,
+ }})
+ })
+
+ It("sorts slots", func() {
+ Expect(state.slots).To(Equal([]*clusterSlot{
+ {start: 0, end: 999, nodes: nil},
+ {start: 1000, end: 1999, nodes: nil},
+ {start: 2000, end: 2999, nodes: nil},
+ }))
+ })
+ })
+
+ Describe("loopback", func() {
+ BeforeEach(func() {
+ state = createClusterState([]ClusterSlot{{
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}},
+ }, {
+ Nodes: []ClusterNode{{Addr: ":1234"}},
+ }})
+ })
+
+ It("replaces loopback hosts in addresses", func() {
+ slotAddr := func(slot *clusterSlot) string {
+ return slot.nodes[0].Client.Options().Addr
+ }
+
+ Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001"))
+ Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002"))
+ Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234"))
+ Expect(slotAddr(state.slots[3])).To(Equal(":1234"))
+ })
+ })
+})
+
+type fixedHash string
+
+func (h fixedHash) Get(string) string {
+ return string(h)
+}
+
+func TestRingSetAddrsAndRebalanceRace(t *testing.T) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+
+ ringShard1Port = "6390"
+ ringShard2Port = "6391"
+ )
+
+ ring := NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ },
+ // Disable heartbeat
+ HeartbeatFrequency: 1 * time.Hour,
+ NewConsistentHash: func(shards []string) ConsistentHash {
+ switch len(shards) {
+ case 1:
+ return fixedHash(ringShard1Name)
+ case 2:
+ return fixedHash(ringShard2Name)
+ default:
+ t.Fatalf("Unexpected number of shards: %v", shards)
+ return nil
+ }
+ },
+ })
+ defer ring.Close()
+
+ // Continuously update addresses by adding and removing one address
+ updatesDone := make(chan struct{})
+ defer func() { close(updatesDone) }()
+ go func() {
+ for i := 0; ; i++ {
+ select {
+ case <-updatesDone:
+ return
+ default:
+ if i%2 == 0 {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ })
+ } else {
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ":" + ringShard1Port,
+ ringShard2Name: ":" + ringShard2Port,
+ })
+ }
+ }
+ }
+ }()
+
+ timer := time.NewTimer(1 * time.Second)
+ for running := true; running; {
+ select {
+ case <-timer.C:
+ running = false
+ default:
+ shard, err := ring.sharding.GetByKey("whatever")
+ if err == nil && shard == nil {
+ t.Fatal("shard is nil")
+ }
+ }
+ }
+}
+
+func BenchmarkRingShardingRebalanceLocked(b *testing.B) {
+ opts := &RingOptions{
+ Addrs: make(map[string]string),
+ // Disable heartbeat
+ HeartbeatFrequency: 1 * time.Hour,
+ }
+ for i := 0; i < 100; i++ {
+ opts.Addrs[fmt.Sprintf("shard%d", i)] = fmt.Sprintf(":63%02d", i)
+ }
+
+ ring := NewRing(opts)
+ defer ring.Close()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ring.sharding.rebalanceLocked()
+ }
+}
+
+type testCounter struct {
+ mu sync.Mutex
+ t *testing.T
+ m map[string]int
+}
+
+func newTestCounter(t *testing.T) *testCounter {
+ return &testCounter{t: t, m: make(map[string]int)}
+}
+
+func (ct *testCounter) increment(key string) {
+ ct.mu.Lock()
+ defer ct.mu.Unlock()
+ ct.m[key]++
+}
+
+func (ct *testCounter) expect(values map[string]int) {
+ ct.mu.Lock()
+ defer ct.mu.Unlock()
+ ct.t.Helper()
+ if !reflect.DeepEqual(values, ct.m) {
+ ct.t.Errorf("expected %v != actual %v", values, ct.m)
+ }
+}
+
+func TestRingShardsCleanup(t *testing.T) {
+ const (
+ ringShard1Name = "ringShardOne"
+ ringShard2Name = "ringShardTwo"
+
+ ringShard1Addr = "shard1.test"
+ ringShard2Addr = "shard2.test"
+ )
+
+ t.Run("closes unused shards", func(t *testing.T) {
+ closeCounter := newTestCounter(t)
+
+ ring := NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ringShard1Addr,
+ ringShard2Name: ringShard2Addr,
+ },
+ NewClient: func(opt *Options) *Client {
+ c := NewClient(opt)
+ c.baseClient.onClose = func() error {
+ closeCounter.increment(opt.Addr)
+ return nil
+ }
+ return c
+ },
+ })
+ closeCounter.expect(map[string]int{})
+
+ // no change due to the same addresses
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ringShard1Addr,
+ ringShard2Name: ringShard2Addr,
+ })
+ closeCounter.expect(map[string]int{})
+
+ ring.SetAddrs(map[string]string{
+ ringShard1Name: ringShard1Addr,
+ })
+ closeCounter.expect(map[string]int{ringShard2Addr: 1})
+
+ ring.SetAddrs(map[string]string{
+ ringShard2Name: ringShard2Addr,
+ })
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+
+ ring.Close()
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 2})
+ })
+
+ t.Run("closes created shards if ring was closed", func(t *testing.T) {
+ createCounter := newTestCounter(t)
+ closeCounter := newTestCounter(t)
+
+ var (
+ ring *Ring
+ shouldClose int32
+ )
+
+ ring = NewRing(&RingOptions{
+ Addrs: map[string]string{
+ ringShard1Name: ringShard1Addr,
+ },
+ NewClient: func(opt *Options) *Client {
+ if atomic.LoadInt32(&shouldClose) != 0 {
+ ring.Close()
+ }
+ createCounter.increment(opt.Addr)
+ c := NewClient(opt)
+ c.baseClient.onClose = func() error {
+ closeCounter.increment(opt.Addr)
+ return nil
+ }
+ return c
+ },
+ })
+ createCounter.expect(map[string]int{ringShard1Addr: 1})
+ closeCounter.expect(map[string]int{})
+
+ atomic.StoreInt32(&shouldClose, 1)
+
+ ring.SetAddrs(map[string]string{
+ ringShard2Name: ringShard2Addr,
+ })
+ createCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+ closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1})
+ })
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutErr struct {
+ error
+}
+
+func (e timeoutErr) Timeout() bool {
+ return true
+}
+
+func (e timeoutErr) Temporary() bool {
+ return true
+}
+
+func (e timeoutErr) Error() string {
+ return "i/o timeout"
+}
+
+var _ = Describe("withConn", func() {
+ var client *Client
+
+ BeforeEach(func() {
+ client = NewClient(&Options{
+ PoolSize: 1,
+ })
+ })
+
+ AfterEach(func() {
+ client.Close()
+ })
+
+ It("should replace the connection in the pool when there is no error", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return nil
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).To(Equal(conn))
+ })
+
+ It("should replace the connection in the pool when there is an error not related to a bad connection", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return proto.RedisError("LOADING")
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).To(Equal(conn))
+ })
+
+ It("should remove the connection from the pool when it times out", func() {
+ var conn *pool.Conn
+
+ client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error {
+ conn = c
+ return timeoutErr{}
+ })
+
+ newConn, err := client.connPool.Get(ctx)
+ Expect(err).To(BeNil())
+ Expect(newConn).NotTo(Equal(conn))
+ Expect(client.connPool.Len()).To(Equal(1))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go
index 2f8bc2b..cd1a828 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator.go
@@ -2,30 +2,21 @@ package redis
import (
"context"
- "sync"
)
// ScanIterator is used to incrementally iterate over a collection of elements.
-// It's safe for concurrent use by multiple goroutines.
type ScanIterator struct {
- mu sync.Mutex // protects Scanner and pos
cmd *ScanCmd
pos int
}
// Err returns the last iterator error, if any.
func (it *ScanIterator) Err() error {
- it.mu.Lock()
- err := it.cmd.Err()
- it.mu.Unlock()
- return err
+ return it.cmd.Err()
}
// Next advances the cursor and returns true if more values can be read.
func (it *ScanIterator) Next(ctx context.Context) bool {
- it.mu.Lock()
- defer it.mu.Unlock()
-
// Instantly return on errors.
if it.cmd.Err() != nil {
return false
@@ -68,10 +59,8 @@ func (it *ScanIterator) Next(ctx context.Context) bool {
// Val returns the key/field at the current cursor position.
func (it *ScanIterator) Val() string {
var v string
- it.mu.Lock()
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
v = it.cmd.page[it.pos-1]
}
- it.mu.Unlock()
return v
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go
index 68c8b77..ccd9414 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/iterator_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/iterator_test.go
@@ -3,10 +3,10 @@ package redis_test
import (
"fmt"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("ScanIterator", func() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go
new file mode 100644
index 0000000..ca731db
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json.go
@@ -0,0 +1,599 @@
+package redis
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// -------------------------------------------
+
+type JSONCmdable interface {
+ JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd
+ JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd
+ JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd
+ JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd
+ JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd
+ JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd
+ JSONClear(ctx context.Context, key, path string) *IntCmd
+ JSONDebugMemory(ctx context.Context, key, path string) *IntCmd
+ JSONDel(ctx context.Context, key, path string) *IntCmd
+ JSONForget(ctx context.Context, key, path string) *IntCmd
+ JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd
+ JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd
+ JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd
+ JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd
+ JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd
+ JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd
+ JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd
+ JSONObjKeys(ctx context.Context, key, path string) *SliceCmd
+ JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd
+ JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd
+ JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd
+ JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONType(ctx context.Context, key, path string) *JSONSliceCmd
+}
+
+type JSONSetArgs struct {
+ Key string
+ Path string
+ Value interface{}
+}
+
+type JSONArrIndexArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONArrTrimArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONCmd struct {
+ baseCmd
+ val string
+ expanded []interface{}
+}
+
+var _ Cmder = (*JSONCmd)(nil)
+
+func newJSONCmd(ctx context.Context, args ...interface{}) *JSONCmd {
+ return &JSONCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *JSONCmd) Val() string {
+ if len(cmd.val) == 0 && cmd.expanded != nil {
+ val, err := json.Marshal(cmd.expanded)
+ if err != nil {
+ cmd.SetErr(err)
+ return ""
+ }
+ return string(val)
+
+ } else {
+ return cmd.val
+ }
+}
+
+func (cmd *JSONCmd) Result() (string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd JSONCmd) Expanded() (interface{}, error) {
+ if len(cmd.val) != 0 && cmd.expanded == nil {
+ err := json.Unmarshal([]byte(cmd.val), &cmd.expanded)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return cmd.expanded, nil
+}
+
+func (cmd *JSONCmd) readReply(rd *proto.Reader) error {
+ // nil response from JSON.(M)GET (cmd.baseCmd.err will be "redis: nil")
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = ""
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+
+ size, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ expanded := make([]interface{}, size)
+
+ for i := 0; i < size; i++ {
+ if expanded[i], err = rd.ReadReply(); err != nil {
+ return err
+ }
+ }
+ cmd.expanded = expanded
+
+ } else {
+ if str, err := rd.ReadString(); err != nil && err != Nil {
+ return err
+ } else if str == "" || err == Nil {
+ cmd.val = ""
+ } else {
+ cmd.val = str
+ }
+ }
+
+ return nil
+}
+
+// -------------------------------------------
+
+type JSONSliceCmd struct {
+ baseCmd
+ val []interface{}
+}
+
+func NewJSONSliceCmd(ctx context.Context, args ...interface{}) *JSONSliceCmd {
+ return &JSONSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONSliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *JSONSliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *JSONSliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *JSONSliceCmd) readReply(rd *proto.Reader) error {
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = nil
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+ response, err := rd.ReadReply()
+ if err != nil {
+ return nil
+ } else {
+ cmd.val = response.([]interface{})
+ }
+
+ } else {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]interface{}, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ }
+ return nil
+}
+
+/*******************************************************************************
+*
+* IntPointerSliceCmd
+* used to represent a RedisJSON response where the result is either an integer or nil
+*
+*******************************************************************************/
+
+type IntPointerSliceCmd struct {
+ baseCmd
+ val []*int64
+}
+
+// NewIntPointerSliceCmd initialises an IntPointerSliceCmd
+func NewIntPointerSliceCmd(ctx context.Context, args ...interface{}) *IntPointerSliceCmd {
+ return &IntPointerSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntPointerSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntPointerSliceCmd) SetVal(val []*int64) {
+ cmd.val = val
+}
+
+func (cmd *IntPointerSliceCmd) Val() []*int64 {
+ return cmd.val
+}
+
+func (cmd *IntPointerSliceCmd) Result() ([]*int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntPointerSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*int64, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ val, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ } else if err != Nil {
+ cmd.val[i] = &val
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// JSONArrAppend adds the provided JSON values to the end of the array at the given path.
+// For more information, see https://redis.io/commands/json.arrappend
+func (c cmdable) JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRAPPEND", key, path}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndex searches for the first occurrence of the provided JSON value in the array at the given path.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndexWithArgs searches for the first occurrence of a JSON value in an array while allowing the start and
+// stop options to be provided.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+
+ if options != nil {
+ args = append(args, options.Start)
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrInsert inserts the JSON values into the array at the specified path before the index (shifts to the right).
+// For more information, see https://redis.io/commands/json.arrinsert
+func (c cmdable) JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINSERT", key, path, index}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrLen reports the length of the JSON array at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.arrlen
+func (c cmdable) JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRLEN", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrPop removes and returns an element from the specified index in the array.
+// For more information, see https://redis.io/commands/json.arrpop
+func (c cmdable) JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd {
+ args := []interface{}{"JSON.ARRPOP", key, path, index}
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrim trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrimWithArgs trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+
+ if options != nil {
+ args = append(args, options.Start)
+
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONClear clears container values (arrays/objects) and sets numeric values to 0.
+// For more information, see https://redis.io/commands/json.clear
+func (c cmdable) JSONClear(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.CLEAR", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONDebugMemory reports a value's memory usage in bytes (unimplemented)
+// For more information, see https://redis.io/commands/json.debug-memory
+func (c cmdable) JSONDebugMemory(ctx context.Context, key, path string) *IntCmd {
+ panic("not implemented")
+}
+
+// JSONDel deletes a value.
+// For more information, see https://redis.io/commands/json.del
+func (c cmdable) JSONDel(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.DEL", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONForget deletes a value.
+// For more information, see https://redis.io/commands/json.forget
+func (c cmdable) JSONForget(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.FORGET", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONGet returns the value at path in JSON serialized form. JSON.GET returns an
+// array of strings. This function parses out the wrapping array but leaves the
+// internal strings unprocessed by default (see Val())
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd {
+ args := make([]interface{}, len(paths)+2)
+ args[0] = "JSON.GET"
+ args[1] = key
+ for n, path := range paths {
+ args[n+2] = path
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type JSONGetArgs struct {
+ Indent string
+ Newline string
+ Space string
+}
+
+// JSONGetWithArgs - Retrieves the value of a key from a JSON document.
+// This function also allows for specifying additional options such as:
+// Indention, NewLine and Space
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd {
+ args := []interface{}{"JSON.GET", key}
+ if options != nil {
+ if options.Indent != "" {
+ args = append(args, "INDENT", options.Indent)
+ }
+ if options.Newline != "" {
+ args = append(args, "NEWLINE", options.Newline)
+ }
+ if options.Space != "" {
+ args = append(args, "SPACE", options.Space)
+ }
+ for _, path := range paths {
+ args = append(args, path)
+ }
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMerge merges a given JSON value into matching paths.
+// For more information, see https://redis.io/commands/json.merge
+func (c cmdable) JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd {
+ args := []interface{}{"JSON.MERGE", key, path, value}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMGet returns the values at the specified path from multiple key arguments.
+// Note - the arguments are reversed when compared with `JSON.MGET` as we want
+// to follow the pattern of having the last argument be variable.
+// For more information, see https://redis.io/commands/json.mget
+func (c cmdable) JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "JSON.MGET"
+ for n, key := range keys {
+ args[n+1] = key
+ }
+ args = append(args, path)
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMSetArgs sets or updates one or more JSON values according to the specified key-path-value triplets.
+// For more information, see https://redis.io/commands/json.mset
+func (c cmdable) JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ for _, doc := range docs {
+ args = append(args, doc.Key, doc.Path, doc.Value)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ args = append(args, params...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONNumIncrBy increments the number value stored at the specified path by the provided number.
+// For more information, see https://redis.io/commands/json.numincreby
+func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd {
+ args := []interface{}{"JSON.NUMINCRBY", key, path, value}
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjKeys returns the keys in the object that's referenced by the specified path.
+// For more information, see https://redis.io/commands/json.objkeys
+func (c cmdable) JSONObjKeys(ctx context.Context, key, path string) *SliceCmd {
+ args := []interface{}{"JSON.OBJKEYS", key, path}
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjLen reports the number of keys in the JSON object at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.objlen
+func (c cmdable) JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.OBJLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONSet sets the JSON value at the given path in the given key. The value must be something that
+// can be marshaled to JSON (using encoding/JSON) unless the argument is a string or a []byte when we assume that
+// it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd {
+ return c.JSONSetMode(ctx, key, path, value, "")
+}
+
+// JSONSetMode sets the JSON value at the given path in the given key and allows the mode to be set
+// (the mode value must be "XX" or "NX"). The value must be something that can be marshaled to JSON (using encoding/JSON) unless
+// the argument is a string or []byte when we assume that it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd {
+ var bytes []byte
+ var err error
+ switch v := value.(type) {
+ case string:
+ bytes = []byte(v)
+ case []byte:
+ bytes = v
+ default:
+ bytes, err = json.Marshal(v)
+ }
+ args := []interface{}{"JSON.SET", key, path, util.BytesToString(bytes)}
+ if mode != "" {
+ switch strings.ToUpper(mode) {
+ case "XX", "NX":
+ args = append(args, strings.ToUpper(mode))
+
+ default:
+ panic("redis: JSON.SET mode must be NX or XX")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ _ = c(ctx, cmd)
+ }
+ return cmd
+}
+
+// JSONStrAppend appends the JSON-string values to the string at the specified path.
+// For more information, see https://redis.io/commands/json.strappend
+func (c cmdable) JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRAPPEND", key, path, value}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONStrLen reports the length of the JSON String at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.strlen
+func (c cmdable) JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONToggle toggles a Boolean value stored at the specified path.
+// For more information, see https://redis.io/commands/json.toggle
+func (c cmdable) JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.TOGGLE", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONType reports the type of JSON value at the specified path.
+// For more information, see https://redis.io/commands/json.type
+func (c cmdable) JSONType(ctx context.Context, key, path string) *JSONSliceCmd {
+ args := []interface{}{"JSON.TYPE", key, path}
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go
new file mode 100644
index 0000000..4e9718a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/json_test.go
@@ -0,0 +1,660 @@
+package redis_test
+
+import (
+ "context"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+type JSONGetTestStruct struct {
+ Hello string `json:"hello"`
+}
+
+var _ = Describe("JSON Commands", Label("json"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushAll(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("arrays", Label("arrays"), func() {
+ It("should JSONArrAppend", Label("json.arrappend", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "append2", "$", `{"a": [10], "b": {"a": [12, 13]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrAppend(ctx, "append2", "$..a", 10)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{2, 3}))
+ })
+
+ It("should JSONArrIndex and JSONArrIndexWithArgs", Label("json.arrindex", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "index1", "$", `{"a": [10], "b": {"a": [12, 10]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ cmd2, err := client.JSONArrIndex(ctx, "index1", "$.b.a", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]int64{1}))
+
+ cmd3, err := client.JSONSet(ctx, "index2", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal("OK"))
+
+ res, err := client.JSONArrIndex(ctx, "index2", "$", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(1)))
+
+ res, err = client.JSONArrIndex(ctx, "index2", "$", 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(-1)))
+
+ res, err = client.JSONArrIndex(ctx, "index2", "$", 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ stop := 5000
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(4)))
+
+ stop = -1
+ res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res[0]).To(Equal(int64(-1)))
+ })
+
+ It("should JSONArrIndex and JSONArrIndexWithArgs with $", Label("json.arrindex", "json"), func() {
+ doc := `{
+ "store": {
+ "book": [
+ {
+ "category": "reference",
+ "author": "Nigel Rees",
+ "title": "Sayings of the Century",
+ "price": 8.95,
+ "size": [10, 20, 30, 40]
+ },
+ {
+ "category": "fiction",
+ "author": "Evelyn Waugh",
+ "title": "Sword of Honour",
+ "price": 12.99,
+ "size": [50, 60, 70, 80]
+ },
+ {
+ "category": "fiction",
+ "author": "Herman Melville",
+ "title": "Moby Dick",
+ "isbn": "0-553-21311-3",
+ "price": 8.99,
+ "size": [5, 10, 20, 30]
+ },
+ {
+ "category": "fiction",
+ "author": "J. R. R. Tolkien",
+ "title": "The Lord of the Rings",
+ "isbn": "0-395-19395-8",
+ "price": 22.99,
+ "size": [5, 6, 7, 8]
+ }
+ ],
+ "bicycle": {"color": "red", "price": 19.95}
+ }
+ }`
+ res, err := client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ resGet, err := client.JSONGet(ctx, "doc1", "$.store.book[?(@.price<10)].size").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal("[[10,20,30,40],[5,10,20,30]]"))
+
+ resArr, err := client.JSONArrIndex(ctx, "doc1", "$.store.book[?(@.price<10)].size", 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resArr).To(Equal([]int64{1, 2}))
+ })
+
+ It("should JSONArrInsert", Label("json.arrinsert", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "insert2", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrInsert(ctx, "insert2", "$", -1, 1, 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{6}))
+
+ cmd3 := client.JSONGet(ctx, "insert2")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ // RESP2 vs RESP3
+ Expect(cmd3.Val()).To(Or(
+ Equal(`[100,200,300,1,2,200]`),
+ Equal(`[[100,200,300,1,2,200]]`)))
+ })
+
+ It("should JSONArrLen", Label("json.arrlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "length2", "$", `{"a": [10], "b": {"a": [12, 10, 20, 12, 90, 10]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrLen(ctx, "length2", "$..a")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]int64{1, 6}))
+ })
+
+ It("should JSONArrPop", Label("json.arrpop"), func() {
+ cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]string{"300"}))
+
+ cmd3 := client.JSONGet(ctx, "pop4", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal("[[100,200,200]]"))
+ })
+
+ It("should JSONArrTrim", Label("json.arrtrim", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "trim1", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ stop := 3
+ cmd2, err := client.JSONArrTrimWithArgs(ctx, "trim1", "$", &redis.JSONArrTrimArgs{Start: 1, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]int64{3}))
+
+ res, err := client.JSONGet(ctx, "trim1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[[1,2,3]]`))
+
+ cmd3, err := client.JSONSet(ctx, "trim2", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal("OK"))
+
+ stop = 3
+ cmd4, err := client.JSONArrTrimWithArgs(ctx, "trim2", "$", &redis.JSONArrTrimArgs{Start: -1, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd4).To(Equal([]int64{0}))
+
+ cmd5, err := client.JSONSet(ctx, "trim3", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd5).To(Equal("OK"))
+
+ stop = 99
+ cmd6, err := client.JSONArrTrimWithArgs(ctx, "trim3", "$", &redis.JSONArrTrimArgs{Start: 3, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd6).To(Equal([]int64{2}))
+
+ cmd7, err := client.JSONSet(ctx, "trim4", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd7).To(Equal("OK"))
+
+ stop = 1
+ cmd8, err := client.JSONArrTrimWithArgs(ctx, "trim4", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd8).To(Equal([]int64{0}))
+
+ cmd9, err := client.JSONSet(ctx, "trim5", "$", `[0,1,2,3,4]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd9).To(Equal("OK"))
+
+ stop = 11
+ cmd10, err := client.JSONArrTrimWithArgs(ctx, "trim5", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd10).To(Equal([]int64{0}))
+ })
+
+ It("should JSONArrPop", Label("json.arrpop", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal([]string{"300"}))
+
+ cmd3 := client.JSONGet(ctx, "pop4", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal("[[100,200,200]]"))
+ })
+ })
+
+ Describe("get/set", Label("getset"), func() {
+ It("should JSONSet", Label("json.set", "json"), func() {
+ cmd := client.JSONSet(ctx, "set1", "$", `{"a": 1, "b": 2, "hello": "world"}`)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("OK"))
+ })
+
+ It("should JSONGet", Label("json.get", "json"), func() {
+ res, err := client.JSONSet(ctx, "get3", "$", `{"a": 1, "b": 2}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[-{--"a":1,--"b":2-}]`))
+
+ res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-", Newline: `~`, Space: `!`}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[~-{~--"a":!1,~--"b":!2~-}~]`))
+ })
+
+ It("should JSONMerge", Label("json.merge", "json"), func() {
+ res, err := client.JSONSet(ctx, "merge1", "$", `{"a": 1, "b": 2}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONMerge(ctx, "merge1", "$", `{"b": 3, "c": 4}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONGet(ctx, "merge1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[{"a":1,"b":3,"c":4}]`))
+ })
+
+ It("should JSONMSet", Label("json.mset", "json", "NonRedisEnterprise"), func() {
+ doc1 := redis.JSONSetArgs{Key: "mset1", Path: "$", Value: `{"a": 1}`}
+ doc2 := redis.JSONSetArgs{Key: "mset2", Path: "$", Value: 2}
+ docs := []redis.JSONSetArgs{doc1, doc2}
+
+ mSetResult, err := client.JSONMSetArgs(ctx, docs).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(mSetResult).To(Equal("OK"))
+
+ res, err := client.JSONMGet(ctx, "$", "mset1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]interface{}{`[{"a":1}]`}))
+
+ res, err = client.JSONMGet(ctx, "$", "mset1", "mset2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal([]interface{}{`[{"a":1}]`, "[2]"}))
+
+ _, err = client.JSONMSet(ctx, "mset1", "$.a", 2, "mset3", "$", `[1]`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should JSONMGet", Label("json.mget", "json", "NonRedisEnterprise"), func() {
+ cmd1 := client.JSONSet(ctx, "mget2a", "$", `{"a": ["aa", "ab", "ac", "ad"], "b": {"a": ["ba", "bb", "bc", "bd"]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+ cmd2 := client.JSONSet(ctx, "mget2b", "$", `{"a": [100, 200, 300, 200], "b": {"a": [100, 200, 300, 200]}}`)
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal("OK"))
+
+ cmd3 := client.JSONMGet(ctx, "$..a", "mget2a", "mget2b")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(HaveLen(2))
+ Expect(cmd3.Val()[0]).To(Equal(`[["aa","ab","ac","ad"],["ba","bb","bc","bd"]]`))
+ Expect(cmd3.Val()[1]).To(Equal(`[[100,200,300,200],[100,200,300,200]]`))
+ })
+
+ It("should JSONMget with $", Label("json.mget", "json", "NonRedisEnterprise"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "b": 2, "nested": {"a": 3}, "c": "", "nested2": {"a": ""}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": 4, "b": 5, "nested": {"a": 6}, "c": "", "nested2": {"a": [""]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONMGet(ctx, "$..a", "doc1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{`[1,3,""]`}))
+
+ iRes, err = client.JSONMGet(ctx, "$..a", "doc1", "doc2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{`[1,3,""]`, `[4,6,[""]]`}))
+
+ iRes, err = client.JSONMGet(ctx, "$..a", "non_existing_doc", "non_existing_doc1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal([]interface{}{nil, nil}))
+ })
+ })
+
+ Describe("Misc", Label("misc"), func() {
+ It("should JSONClear", Label("json.clear", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "clear1", "$", `[1]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONClear(ctx, "clear1", "$")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(1)))
+
+ cmd3 := client.JSONGet(ctx, "clear1", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal(`[[]]`))
+ })
+
+ It("should JSONClear with $", Label("json.clear", "json"), func() {
+ doc := `{
+ "nested1": {"a": {"foo": 10, "bar": 20}},
+ "a": ["foo"],
+ "nested2": {"a": "claro"},
+ "nested3": {"a": {"baz": 50}}
+ }`
+ res, err := client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONClear(ctx, "doc1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resGet, err := client.JSONGet(ctx, "doc1", `$`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":[],"nested2":{"a":"claro"},"nested3":{"a":{}}}]`))
+
+ res, err = client.JSONSet(ctx, "doc1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONClear(ctx, "doc1", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "doc1", `$`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":["foo"],"nested2":{"a":"claro"},"nested3":{"a":{"baz":50}}}]`))
+ })
+
+ It("should JSONDel", Label("json.del", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "del1", "$", `[1]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONDel(ctx, "del1", "$")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(1)))
+
+ cmd3 := client.JSONGet(ctx, "del1", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(HaveLen(0))
+ })
+
+ It("should JSONDel with $", Label("json.del", "json"), func() {
+ res, err := client.JSONSet(ctx, "del1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONDel(ctx, "del1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(2)))
+
+ resGet, err := client.JSONGet(ctx, "del1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`))
+
+ res, err = client.JSONSet(ctx, "del2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONDel(ctx, "del2", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "del2", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`))
+
+ doc := `[
+ {
+ "ciao": ["non ancora"],
+ "nested": [
+ {"ciao": [1, "a"]},
+ {"ciao": [2, "a"]},
+ {"ciaoc": [3, "non", "ciao"]},
+ {"ciao": [4, "a"]},
+ {"e": [5, "non", "ciao"]}
+ ]
+ }
+ ]`
+ res, err = client.JSONSet(ctx, "del3", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONDel(ctx, "del3", `$.[0]["nested"]..ciao`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]`
+ resGet, err = client.JSONGet(ctx, "del3", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(resVal))
+ })
+
+ It("should JSONForget", Label("json.forget", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "forget3", "$", `{"a": [1,2,3], "b": {"a": [1,2,3], "b": "annie"}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONForget(ctx, "forget3", "$..a")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(int64(2)))
+
+ cmd3 := client.JSONGet(ctx, "forget3", "$")
+ Expect(cmd3.Err()).NotTo(HaveOccurred())
+ Expect(cmd3.Val()).To(Equal(`[{"b":{"b":"annie"}}]`))
+ })
+
+ It("should JSONForget with $", Label("json.forget", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err := client.JSONForget(ctx, "doc1", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(2)))
+
+ resGet, err := client.JSONGet(ctx, "doc1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONForget(ctx, "doc2", "$..a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(1)))
+
+ resGet, err = client.JSONGet(ctx, "doc2", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`))
+
+ doc := `[
+ {
+ "ciao": ["non ancora"],
+ "nested": [
+ {"ciao": [1, "a"]},
+ {"ciao": [2, "a"]},
+ {"ciaoc": [3, "non", "ciao"]},
+ {"ciao": [4, "a"]},
+ {"e": [5, "non", "ciao"]}
+ ]
+ }
+ ]`
+ res, err = client.JSONSet(ctx, "doc3", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ iRes, err = client.JSONForget(ctx, "doc3", `$.[0]["nested"]..ciao`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(iRes).To(Equal(int64(3)))
+
+ resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]`
+ resGet, err = client.JSONGet(ctx, "doc3", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resGet).To(Equal(resVal))
+ })
+
+ It("should JSONNumIncrBy", Label("json.numincrby", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "incr3", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONNumIncrBy(ctx, "incr3", "$..a[1]", float64(1))
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(Equal(`[3,0]`))
+ })
+
+ It("should JSONNumIncrBy with $", Label("json.numincrby", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[7]`))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 3.5).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[10.5]`))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ res, err = client.JSONNumIncrBy(ctx, "doc2", "$.b[0].a", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal(`[5]`))
+ })
+
+ It("should JSONObjKeys", Label("json.objkeys", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "objkeys1", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONObjKeys(ctx, "objkeys1", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(7))
+ Expect(cmd2.Val()).To(Equal([]interface{}{nil, []interface{}{"a"}, nil, nil, nil, nil, nil}))
+ })
+
+ It("should JSONObjKeys with $", Label("json.objkeys", "json"), func() {
+ doc := `{
+ "nested1": {"a": {"foo": 10, "bar": 20}},
+ "a": ["foo"],
+ "nested2": {"a": {"baz": 50}}
+ }`
+ cmd1, err := client.JSONSet(ctx, "objkeys1", "$", doc).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+
+ cmd2, err := client.JSONObjKeys(ctx, "objkeys1", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{[]interface{}{"foo", "bar"}}))
+
+ cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".*.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{"foo", "bar"}))
+
+ cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".nested2.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd2).To(Equal([]interface{}{"baz"}))
+
+ _, err = client.JSONObjKeys(ctx, "non_existing_doc", "..a").Result()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should JSONObjLen", Label("json.objlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "objlen2", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONObjLen(ctx, "objlen2", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(7))
+ Expect(cmd2.Val()[0]).To(BeNil())
+ Expect(*cmd2.Val()[1]).To(Equal(int64(1)))
+ })
+
+ It("should JSONStrLen", Label("json.strlen", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "strlen2", "$", `{"a": "alice", "b": "bob", "c": {"a": "alice", "b": "bob"}}`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONStrLen(ctx, "strlen2", "$..*")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(5))
+ var tmp int64 = 20
+ Expect(cmd2.Val()[0]).To(BeAssignableToTypeOf(&tmp))
+ Expect(*cmd2.Val()[0]).To(Equal(int64(5)))
+ Expect(*cmd2.Val()[1]).To(Equal(int64(3)))
+ Expect(cmd2.Val()[2]).To(BeNil())
+ Expect(*cmd2.Val()[3]).To(Equal(int64(5)))
+ Expect(*cmd2.Val()[4]).To(Equal(int64(3)))
+ })
+
+ It("should JSONStrAppend", Label("json.strappend", "json"), func() {
+ cmd1, err := client.JSONSet(ctx, "strapp1", "$", `"foo"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd1).To(Equal("OK"))
+ cmd2, err := client.JSONStrAppend(ctx, "strapp1", "$", `"bar"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*cmd2[0]).To(Equal(int64(6)))
+ cmd3, err := client.JSONGet(ctx, "strapp1", "$").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmd3).To(Equal(`["foobar"]`))
+ })
+
+ It("should JSONStrAppend and JSONStrLen with $", Label("json.strappend", "json.strlen", "json"), func() {
+ res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ intArrayResult, err := client.JSONStrAppend(ctx, "doc1", "$.nested1.a", `"baz"`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*intArrayResult[0]).To(Equal(int64(8)))
+
+ res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+
+ intResult, err := client.JSONStrLen(ctx, "doc2", "$.nested1.a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(*intResult[0]).To(Equal(int64(5)))
+ })
+
+ It("should JSONToggle", Label("json.toggle", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "toggle1", "$", `[true]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONToggle(ctx, "toggle1", "$[0]")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(1))
+ Expect(*cmd2.Val()[0]).To(Equal(int64(0)))
+ })
+
+ It("should JSONType", Label("json.type", "json"), func() {
+ cmd1 := client.JSONSet(ctx, "type1", "$", `[true]`)
+ Expect(cmd1.Err()).NotTo(HaveOccurred())
+ Expect(cmd1.Val()).To(Equal("OK"))
+
+ cmd2 := client.JSONType(ctx, "type1", "$[0]")
+ Expect(cmd2.Err()).NotTo(HaveOccurred())
+ Expect(cmd2.Val()).To(HaveLen(1))
+ // RESP2 v RESP3
+ Expect(cmd2.Val()[0]).To(Or(Equal([]interface{}{"boolean"}), Equal("boolean")))
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go
new file mode 100644
index 0000000..24a0de0
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/list_commands.go
@@ -0,0 +1,289 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type ListCmdable interface {
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+ BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+}
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "blmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names.
+// direction: left or right, count: > 0
+// example: client.LMPop(ctx, "left", 3, "key1", "key2")
+func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "lmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMove(
+ ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+ cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go
index 5414310..19e9444 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/main_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/main_test.go
@@ -1,26 +1,23 @@
package redis_test
import (
- "context"
- "errors"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
+ "strconv"
"sync"
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
const (
- redisPort = "6380"
- redisAddr = ":" + redisPort
redisSecondaryPort = "6381"
)
@@ -41,6 +38,16 @@ const (
)
var (
+ redisPort = "6380"
+ redisAddr = ":" + redisPort
+)
+
+var (
+ rediStackPort = "6379"
+ rediStackAddr = ":" + rediStackPort
+)
+
+var (
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
processes map[string]*redisProcess
@@ -58,6 +65,8 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6),
}
+var RECluster = false
+
func registerProcess(port string, p *redisProcess) {
if processes == nil {
processes = make(map[string]*redisProcess)
@@ -66,48 +75,62 @@ func registerProcess(port string, p *redisProcess) {
}
var _ = BeforeSuite(func() {
+ addr := os.Getenv("REDIS_PORT")
+ if addr != "" {
+ redisPort = addr
+ redisAddr = ":" + redisPort
+ }
var err error
+ RECluster, _ = strconv.ParseBool(os.Getenv("RE_CLUSTER"))
- redisMain, err = startRedis(redisPort)
- Expect(err).NotTo(HaveOccurred())
+ if !RECluster {
- ringShard1, err = startRedis(ringShard1Port)
- Expect(err).NotTo(HaveOccurred())
+ redisMain, err = startRedis(redisPort)
+ Expect(err).NotTo(HaveOccurred())
- ringShard2, err = startRedis(ringShard2Port)
- Expect(err).NotTo(HaveOccurred())
+ ringShard1, err = startRedis(ringShard1Port)
+ Expect(err).NotTo(HaveOccurred())
- ringShard3, err = startRedis(ringShard3Port)
- Expect(err).NotTo(HaveOccurred())
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
- sentinelMaster, err = startRedis(sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ ringShard3, err = startRedis(ringShard3Port)
+ Expect(err).NotTo(HaveOccurred())
- sentinel1, err = startSentinel(sentinelPort1, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinelMaster, err = startRedis(sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinel2, err = startSentinel(sentinelPort2, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel1, err = startSentinel(sentinelPort1, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinel3, err = startSentinel(sentinelPort3, sentinelName, sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel2, err = startSentinel(sentinelPort2, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinelSlave1, err = startRedis(
- sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinel3, err = startSentinel(sentinelPort3, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- sentinelSlave2, err = startRedis(
- sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
- Expect(err).NotTo(HaveOccurred())
+ sentinelSlave1, err = startRedis(
+ sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
- Expect(startCluster(ctx, cluster)).NotTo(HaveOccurred())
+ sentinelSlave2, err = startRedis(
+ sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(startCluster(ctx, cluster)).NotTo(HaveOccurred())
+ } else {
+ redisPort = rediStackPort
+ redisAddr = rediStackAddr
+ }
})
var _ = AfterSuite(func() {
- Expect(cluster.Close()).NotTo(HaveOccurred())
+ if !RECluster {
+ Expect(cluster.Close()).NotTo(HaveOccurred())
- for _, p := range processes {
- Expect(p.Close()).NotTo(HaveOccurred())
+ for _, p := range processes {
+ Expect(p.Close()).NotTo(HaveOccurred())
+ }
}
processes = nil
})
@@ -120,20 +143,37 @@ func TestGinkgoSuite(t *testing.T) {
//------------------------------------------------------------------------------
func redisOptions() *redis.Options {
+ if RECluster {
+ return &redis.Options{
+ Addr: redisAddr,
+ DB: 0,
+
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ ContextTimeoutEnabled: true,
+
+ MaxRetries: -1,
+ PoolSize: 10,
+
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
+ }
+ }
return &redis.Options{
Addr: redisAddr,
DB: 15,
- DialTimeout: 10 * time.Second,
- ReadTimeout: 30 * time.Second,
- WriteTimeout: 30 * time.Second,
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ ContextTimeoutEnabled: true,
MaxRetries: -1,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -145,10 +185,9 @@ func redisClusterOptions() *redis.ClusterOptions {
MaxRedirects: 8,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -165,10 +204,9 @@ func redisRingOptions() *redis.RingOptions {
MaxRetries: -1,
- PoolSize: 10,
- PoolTimeout: 30 * time.Second,
- IdleTimeout: time.Minute,
- IdleCheckFrequency: 100 * time.Millisecond,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ ConnMaxIdleTime: time.Minute,
}
}
@@ -272,7 +310,7 @@ func (p *redisProcess) Close() error {
if err := p.Client.Ping(ctx).Err(); err != nil {
return nil
}
- return errors.New("client is not shutdown")
+ return fmt.Errorf("client %s is not shutdown", p.Options().Addr)
}, 10*time.Second)
if err != nil {
return err
@@ -283,8 +321,9 @@ func (p *redisProcess) Close() error {
}
var (
- redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
- redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+ redisSentinelConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "sentinel.conf"))
)
func redisDir(port string) (string, error) {
@@ -306,11 +345,12 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
if err != nil {
return nil, err
}
- if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
+
+ if err := exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
return nil, err
}
- baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir}
+ baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir, "--enable-module-command", "yes"}
process, err := execCmd(redisServerBin, append(baseArgs, args...)...)
if err != nil {
return nil, err
@@ -324,7 +364,7 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
p := &redisProcess{process, client}
registerProcess(port, p)
- return p, err
+ return p, nil
}
func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
@@ -333,7 +373,12 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
return nil, err
}
- process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
+ sentinelConf := filepath.Join(dir, "sentinel.conf")
+ if err := os.WriteFile(sentinelConf, nil, 0o644); err != nil {
+ return nil, err
+ }
+
+ process, err := execCmd(redisServerBin, sentinelConf, "--sentinel", "--port", port, "--dir", dir)
if err != nil {
return nil, err
}
@@ -355,7 +400,7 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
client.Process(ctx, cmd)
if err := cmd.Err(); err != nil {
process.Kill()
- return nil, err
+ return nil, fmt.Errorf("%s failed: %w", cmd, err)
}
}
@@ -412,37 +457,28 @@ func (cn *badConn) Write([]byte) (int, error) {
//------------------------------------------------------------------------------
type hook struct {
- beforeProcess func(ctx context.Context, cmd redis.Cmder) (context.Context, error)
- afterProcess func(ctx context.Context, cmd redis.Cmder) error
-
- beforeProcessPipeline func(ctx context.Context, cmds []redis.Cmder) (context.Context, error)
- afterProcessPipeline func(ctx context.Context, cmds []redis.Cmder) error
+ dialHook func(hook redis.DialHook) redis.DialHook
+ processHook func(hook redis.ProcessHook) redis.ProcessHook
+ processPipelineHook func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook
}
-func (h *hook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- if h.beforeProcess != nil {
- return h.beforeProcess(ctx, cmd)
+func (h *hook) DialHook(hook redis.DialHook) redis.DialHook {
+ if h.dialHook != nil {
+ return h.dialHook(hook)
}
- return ctx, nil
+ return hook
}
-func (h *hook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- if h.afterProcess != nil {
- return h.afterProcess(ctx, cmd)
+func (h *hook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ if h.processHook != nil {
+ return h.processHook(hook)
}
- return nil
+ return hook
}
-func (h *hook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- if h.beforeProcessPipeline != nil {
- return h.beforeProcessPipeline(ctx, cmds)
+func (h *hook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ if h.processPipelineHook != nil {
+ return h.processPipelineHook(hook)
}
- return ctx, nil
-}
-
-func (h *hook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
- if h.afterProcessPipeline != nil {
- return h.afterProcessPipeline(ctx, cmds)
- }
- return nil
+ return hook
}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go
new file mode 100644
index 0000000..1bc82ec
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/monitor_test.go
@@ -0,0 +1,48 @@
+package redis_test
+
+import (
+ "context"
+ "time"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("Monitor command", Label("monitor"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should monitor", Label("monitor"), func() {
+ ress := make(chan string)
+ client1 := redis.NewClient(&redis.Options{Addr: rediStackAddr})
+ mn := client1.Monitor(ctx, ress)
+ mn.Start()
+ // Wait for the Redis server to be in monitoring mode.
+ time.Sleep(100 * time.Millisecond)
+ client.Set(ctx, "foo", "bar", 0)
+ client.Set(ctx, "bar", "baz", 0)
+ client.Set(ctx, "bap", 8, 0)
+ client.Get(ctx, "bap")
+ lst := []string{}
+ for i := 0; i < 5; i++ {
+ s := <-ress
+ lst = append(lst, s)
+ }
+ mn.Stop()
+ Expect(lst[0]).To(ContainSubstring("OK"))
+ Expect(lst[1]).To(ContainSubstring(`"set" "foo" "bar"`))
+ Expect(lst[2]).To(ContainSubstring(`"set" "bar" "baz"`))
+ Expect(lst[3]).To(ContainSubstring(`"set" "bap" "8"`))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go
index a4abe32..dff52ae 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options.go
@@ -13,7 +13,7 @@ import (
"strings"
"time"
- "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/redis/go-redis/v9/internal/pool"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@@ -27,7 +27,7 @@ type Limiter interface {
ReportResult(result error)
}
-// Options keeps the settings to setup redis connection.
+// Options keeps the settings to set up redis connection.
type Options struct {
// The network type, either tcp or unix.
// Default is tcp.
@@ -35,6 +35,9 @@ type Options struct {
// host:port address.
Addr string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@@ -42,6 +45,9 @@ type Options struct {
// Hook that is called when new connection is established.
OnConnect func(ctx context.Context, cn *Conn) error
+ // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server.
+ // Default is 3.
+ Protocol int
// Use the specified Username to authenticate the current connection
// with one of the connections defined in the ACL list when connecting
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
@@ -51,6 +57,9 @@ type Options struct {
// or the User Password when connecting to a Redis 6.0 instance, or greater,
// that is using the Redis ACL system.
Password string
+ // CredentialsProvider allows the username and password to be updated
+ // before reconnecting. It should return the current username and password.
+ CredentialsProvider func() (username string, password string)
// Database to be selected after connecting to the server.
DB int
@@ -69,49 +78,75 @@ type Options struct {
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
- // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
- // Default is 3 seconds.
+ // with a timeout instead of blocking. Supported values:
+ // - `0` - default timeout (3 seconds).
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetReadDeadline calls completely.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
- // with a timeout instead of blocking.
- // Default is ReadTimeout.
+ // with a timeout instead of blocking. Supported values:
+ // - `0` - default timeout (3 seconds).
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetWriteDeadline calls completely.
WriteTimeout time.Duration
+ // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines.
+ // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts
+ ContextTimeoutEnabled bool
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
- // Note that fifo has higher overhead compared to lifo.
+ // Note that FIFO has slightly higher overhead compared to LIFO,
+ // but it helps closing idle connections faster reducing the pool size.
PoolFIFO bool
- // Maximum number of socket connections.
+ // Base number of socket connections.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+ // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
+ // you can limit it through MaxActiveConns
PoolSize int
- // Minimum number of idle connections which is useful when establishing
- // new connection is slow.
- MinIdleConns int
- // Connection age at which client retires (closes) the connection.
- // Default is to not close aged connections.
- MaxConnAge time.Duration
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
- // Amount of time after which client closes idle connections.
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ // Default is 0. the idle connections are not closed by default.
+ MinIdleConns int
+ // Maximum number of idle connections.
+ // Default is 0. the idle connections are not closed by default.
+ MaxIdleConns int
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActiveConns int
+ // ConnMaxIdleTime is the maximum amount of time a connection may be idle.
// Should be less than server's timeout.
- // Default is 5 minutes. -1 disables idle timeout check.
- IdleTimeout time.Duration
- // Frequency of idle checks made by idle connections reaper.
- // Default is 1 minute. -1 disables idle connections reaper,
- // but idle connections are still discarded by the client
- // if IdleTimeout is set.
- IdleCheckFrequency time.Duration
-
- // Enables read only queries on slave nodes.
- readOnly bool
-
- // TLS Config to use. When set TLS will be negotiated.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If d <= 0, connections are not closed due to a connection's idle time.
+ //
+ // Default is 30 minutes. -1 disables idle timeout check.
+ ConnMaxIdleTime time.Duration
+ // ConnMaxLifetime is the maximum amount of time a connection may be reused.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If <= 0, connections are not closed due to a connection's age.
+ //
+ // Default is to not close idle connections.
+ ConnMaxLifetime time.Duration
+
+ // TLS Config to use. When set, TLS will be negotiated.
TLSConfig *tls.Config
- // Limiter interface used to implemented circuit breaker or rate limiter.
+ // Limiter interface used to implement circuit breaker or rate limiter.
Limiter Limiter
+
+ // Enables read only queries on slave/follower nodes.
+ readOnly bool
+
+ // Disable set-lib on connect. Default is false.
+ DisableIndentity bool
+
+ // Add suffix to client name. Default is empty.
+ IdentitySuffix string
}
func (opt *Options) init() {
@@ -129,40 +164,36 @@ func (opt *Options) init() {
opt.DialTimeout = 5 * time.Second
}
if opt.Dialer == nil {
- opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
- netDialer := &net.Dialer{
- Timeout: opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
- }
+ opt.Dialer = NewDialer(opt)
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
+ case -2:
+ opt.ReadTimeout = -1
case -1:
opt.ReadTimeout = 0
case 0:
opt.ReadTimeout = 3 * time.Second
}
switch opt.WriteTimeout {
+ case -2:
+ opt.WriteTimeout = -1
case -1:
opt.WriteTimeout = 0
case 0:
opt.WriteTimeout = opt.ReadTimeout
}
if opt.PoolTimeout == 0 {
- opt.PoolTimeout = opt.ReadTimeout + time.Second
- }
- if opt.IdleTimeout == 0 {
- opt.IdleTimeout = 5 * time.Minute
+ if opt.ReadTimeout > 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ } else {
+ opt.PoolTimeout = 30 * time.Second
+ }
}
- if opt.IdleCheckFrequency == 0 {
- opt.IdleCheckFrequency = time.Minute
+ if opt.ConnMaxIdleTime == 0 {
+ opt.ConnMaxIdleTime = 30 * time.Minute
}
if opt.MaxRetries == -1 {
@@ -189,36 +220,57 @@ func (opt *Options) clone() *Options {
return &clone
}
+// NewDialer returns a function that will be used as the default dialer
+// when none is specified in Options.Dialer.
+func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+}
+
// ParseURL parses an URL into Options that can be used to connect to Redis.
// Scheme is required.
// There are two connection types: by tcp socket and by unix socket.
// Tcp connection:
-// redis://<user>:<password>@<host>:<port>/<db_number>
+//
+// redis://<user>:<password>@<host>:<port>/<db_number>
+//
// Unix connection:
-// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+//
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+//
// Most Option fields can be set using query parameters, with the following restrictions:
-// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
-// - only scalar type fields are supported (bool, int, time.Duration)
-// - for time.Duration fields, values must be a valid input for time.ParseDuration();
-// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
-// - to disable a duration field, use value less than or equal to 0; to use the default
-// value, leave the value blank or remove the parameter
-// - only the last value is interpreted if a parameter is given multiple times
-// - fields "network", "addr", "username" and "password" can only be set using other
-// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
-// names will be treated as unknown parameters
-// - unknown parameter names will result in an error
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
// Examples:
-// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
-// is equivalent to:
-// &Options{
-// Network: "tcp",
-// Addr: "localhost:6789",
-// DB: 1, // path "/3" was overridden by "&db=1"
-// DialTimeout: 3 * time.Second, // no time unit = seconds
-// ReadTimeout: 6 * time.Second,
-// MaxRetries: 2,
-// }
+//
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
func ParseURL(redisURL string) (*Options, error) {
u, err := url.Parse(redisURL)
if err != nil {
@@ -240,16 +292,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
o.Username, o.Password = getUserPassword(u)
- h, p, err := net.SplitHostPort(u.Host)
- if err != nil {
- h = u.Host
- }
- if h == "" {
- h = "localhost"
- }
- if p == "" {
- p = "6379"
- }
+ h, p := getHostPortWithDefaults(u)
o.Addr = net.JoinHostPort(h, p)
f := strings.FieldsFunc(u.Path, func(r rune) bool {
@@ -259,6 +302,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
case 0:
o.DB = 0
case 1:
+ var err error
if o.DB, err = strconv.Atoi(f[0]); err != nil {
return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
}
@@ -267,12 +311,32 @@ func setupTCPConn(u *url.URL) (*Options, error) {
}
if u.Scheme == "rediss" {
- o.TLSConfig = &tls.Config{ServerName: h}
+ o.TLSConfig = &tls.Config{
+ ServerName: h,
+ MinVersion: tls.VersionTLS12,
+ }
}
return setupConnParams(u, o)
}
+// getHostPortWithDefaults is a helper function that splits the url into
+// a host and a port. If the host is missing, it defaults to localhost
+// and if the port is missing, it defaults to 6379.
+func getHostPortWithDefaults(u *url.URL) (string, string) {
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ if port == "" {
+ port = "6379"
+ }
+ return host, port
+}
+
func setupUnixConn(u *url.URL) (*Options, error) {
o := &Options{
Network: "unix",
@@ -291,6 +355,10 @@ type queryOptions struct {
err error
}
+func (o *queryOptions) has(name string) bool {
+ return len(o.q[name]) > 0
+}
+
func (o *queryOptions) string(name string) string {
vs := o.q[name]
if len(vs) == 0 {
@@ -300,6 +368,12 @@ func (o *queryOptions) string(name string) string {
return vs[len(vs)-1]
}
+func (o *queryOptions) strings(name string) []string {
+ vs := o.q[name]
+ delete(o.q, name)
+ return vs
+}
+
func (o *queryOptions) int(name string) int {
s := o.string(name)
if s == "" {
@@ -377,6 +451,8 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.DB = db
}
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
o.MaxRetries = q.int("max_retries")
o.MinRetryBackoff = q.duration("min_retry_backoff")
o.MaxRetryBackoff = q.duration("max_retry_backoff")
@@ -385,11 +461,20 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) {
o.WriteTimeout = q.duration("write_timeout")
o.PoolFIFO = q.bool("pool_fifo")
o.PoolSize = q.int("pool_size")
- o.MinIdleConns = q.int("min_idle_conns")
- o.MaxConnAge = q.duration("max_conn_age")
o.PoolTimeout = q.duration("pool_timeout")
- o.IdleTimeout = q.duration("idle_timeout")
- o.IdleCheckFrequency = q.duration("idle_check_frequency")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ if q.has("conn_max_idle_time") {
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ } else {
+ o.ConnMaxIdleTime = q.duration("idle_timeout")
+ }
+ if q.has("conn_max_lifetime") {
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ } else {
+ o.ConnMaxLifetime = q.duration("max_conn_age")
+ }
if q.err != nil {
return nil, q.err
}
@@ -413,17 +498,21 @@ func getUserPassword(u *url.URL) (string, string) {
return user, password
}
-func newConnPool(opt *Options) *pool.ConnPool {
+func newConnPool(
+ opt *Options,
+ dialer func(ctx context.Context, network, addr string) (net.Conn, error),
+) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
- return opt.Dialer(ctx, opt.Network, opt.Addr)
+ return dialer(ctx, opt.Network, opt.Addr)
},
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
})
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go
index 1450523..1db36fd 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/options_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/options_test.go
@@ -1,5 +1,4 @@
//go:build go1.7
-// +build go1.7
package redis
@@ -46,19 +45,25 @@ func TestParseURL(t *testing.T) {
o: &Options{Addr: "localhost:123", DB: 2, ReadTimeout: 2 * time.Second, PoolFIFO: true},
}, {
// special case handling for disabled timeouts
- url: "redis://localhost:123/?db=2&idle_timeout=0",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=0",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: -1},
}, {
// negative values disable timeouts as well
- url: "redis://localhost:123/?db=2&idle_timeout=-1",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: -1},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=-1",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: -1},
}, {
// absent timeout values will use defaults
- url: "redis://localhost:123/?db=2&idle_timeout=",
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time=",
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: 0},
}, {
- url: "redis://localhost:123/?db=2&idle_timeout", // missing "=" at the end
- o: &Options{Addr: "localhost:123", DB: 2, IdleTimeout: 0},
+ url: "redis://localhost:123/?db=2&conn_max_idle_time", // missing "=" at the end
+ o: &Options{Addr: "localhost:123", DB: 2, ConnMaxIdleTime: 0},
+ }, {
+ url: "redis://localhost:123/?db=2&client_name=hi", // client name
+ o: &Options{Addr: "localhost:123", DB: 2, ClientName: "hi"},
+ }, {
+ url: "redis://localhost:123/?db=2&protocol=2", // RESP Protocol
+ o: &Options{Addr: "localhost:123", DB: 2, Protocol: 2},
}, {
url: "unix:///tmp/redis.sock",
o: &Options{Addr: "/tmp/redis.sock"},
@@ -174,20 +179,20 @@ func comprareOptions(t *testing.T, actual, expected *Options) {
if actual.PoolSize != expected.PoolSize {
t.Errorf("PoolSize: got %v, expected %v", actual.PoolSize, expected.PoolSize)
}
+ if actual.PoolTimeout != expected.PoolTimeout {
+ t.Errorf("PoolTimeout: got %v, expected %v", actual.PoolTimeout, expected.PoolTimeout)
+ }
if actual.MinIdleConns != expected.MinIdleConns {
t.Errorf("MinIdleConns: got %v, expected %v", actual.MinIdleConns, expected.MinIdleConns)
}
- if actual.MaxConnAge != expected.MaxConnAge {
- t.Errorf("MaxConnAge: got %v, expected %v", actual.MaxConnAge, expected.MaxConnAge)
- }
- if actual.PoolTimeout != expected.PoolTimeout {
- t.Errorf("PoolTimeout: got %v, expected %v", actual.PoolTimeout, expected.PoolTimeout)
+ if actual.MaxIdleConns != expected.MaxIdleConns {
+ t.Errorf("MaxIdleConns: got %v, expected %v", actual.MaxIdleConns, expected.MaxIdleConns)
}
- if actual.IdleTimeout != expected.IdleTimeout {
- t.Errorf("IdleTimeout: got %v, expected %v", actual.IdleTimeout, expected.IdleTimeout)
+ if actual.ConnMaxIdleTime != expected.ConnMaxIdleTime {
+ t.Errorf("ConnMaxIdleTime: got %v, expected %v", actual.ConnMaxIdleTime, expected.ConnMaxIdleTime)
}
- if actual.IdleCheckFrequency != expected.IdleCheckFrequency {
- t.Errorf("IdleCheckFrequency: got %v, expected %v", actual.IdleCheckFrequency, expected.IdleCheckFrequency)
+ if actual.ConnMaxLifetime != expected.ConnMaxLifetime {
+ t.Errorf("ConnMaxLifetime: got %v, expected %v", actual.ConnMaxLifetime, expected.ConnMaxLifetime)
}
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go
index a54f2f3..17f98d9 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster.go
@@ -6,17 +6,19 @@ import (
"fmt"
"math"
"net"
+ "net/url"
"runtime"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
)
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
@@ -27,6 +29,9 @@ type ClusterOptions struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// NewClient creates a cluster node client with provided name and options.
NewClient func(opt *Options) *Client
@@ -57,29 +62,33 @@ type ClusterOptions struct {
OnConnect func(ctx context.Context, cn *Conn) error
- Username string
- Password string
+ Protocol int
+ Username string
+ Password string
+ CredentialsProvider func() (username string, password string)
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int // applies per cluster node and not for the whole cluster
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ TLSConfig *tls.Config
+ DisableIndentity bool // Disable set-lib on connect. Default is false.
- TLSConfig *tls.Config
+ IdentitySuffix string // Add suffix to client name. Default is empty.
}
func (opt *ClusterOptions) init() {
@@ -131,33 +140,163 @@ func (opt *ClusterOptions) init() {
}
}
-func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://<user>:<password>@<host>:<port>
+// or
+// rediss://<user>:<password>@<host>:<port>
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
+// or
+// rediss://<user>:<password>@<host>:<port>?addr=<host2>:<port2>&addr=<host3>:<port3>
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
- Username: opt.Username,
- Password: opt.Password,
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ CredentialsProvider: opt.CredentialsProvider,
MaxRetries: opt.MaxRetries,
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: disableIdleCheck,
-
- TLSConfig: opt.TLSConfig,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ TLSConfig: opt.TLSConfig,
// If ClusterSlots is populated, then we probably have an artificial
// cluster whose nodes are not in clustering mode (otherwise there isn't
// much use for ClusterSlots config). This means we cannot execute the
@@ -204,15 +343,26 @@ func (n *clusterNode) updateLatency() {
const numProbe = 10
var dur uint64
+ successes := 0
for i := 0; i < numProbe; i++ {
time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
start := time.Now()
- n.Client.Ping(context.TODO())
- dur += uint64(time.Since(start) / time.Microsecond)
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
}
- latency := float64(dur) / float64(numProbe)
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((1 * time.Minute) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
atomic.StoreUint32(&n.latency, uint32(latency+0.5))
}
@@ -262,6 +412,7 @@ type clusterNodes struct {
nodes map[string]*clusterNode
activeAddrs []string
closed bool
+ onNewNode []func(rdb *Client)
_generation uint32 // atomic
}
@@ -297,6 +448,12 @@ func (c *clusterNodes) Close() error {
return firstErr
}
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
@@ -374,6 +531,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
}
node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
c.addrs = appendIfNotExists(c.addrs, addr)
c.nodes[addr] = node
@@ -683,21 +843,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er
//------------------------------------------------------------------------------
-type clusterClient struct {
- opt *ClusterOptions
- nodes *clusterNodes
- state *clusterStateHolder //nolint:structcheck
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
// ClusterClient is a Redis Cluster client representing a pool of zero
// or more underlying connections. It's safe for concurrent use by
// multiple goroutines.
type ClusterClient struct {
- *clusterClient
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
cmdable
- hooks
- ctx context.Context
+ hooksMixin
}
// NewClusterClient returns a Redis Cluster client as described in
@@ -706,38 +861,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt.init()
c := &ClusterClient{
- clusterClient: &clusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- },
- ctx: context.Background(),
+ opt: opt,
+ nodes: newClusterNodes(opt),
}
+
c.state = newClusterStateHolder(c.loadState)
c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
c.cmdable = c.Process
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
return c
}
-func (c *ClusterClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
// Options returns read-only Options that were used to create the client.
func (c *ClusterClient) Options() *ClusterOptions {
return c.opt
@@ -757,7 +898,7 @@ func (c *ClusterClient) Close() error {
return c.nodes.Close()
}
-// Do creates a Cmd from the args and processes the cmd.
+// Do create a Cmd from the args and processes the cmd.
func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@@ -765,13 +906,13 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- cmdInfo := c.cmdInfo(cmd.Name())
- slot := c.cmdSlot(cmd)
-
+ slot := c.cmdSlot(ctx, cmd)
var node *clusterNode
var ask bool
var lastErr error
@@ -784,19 +925,19 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
if node == nil {
var err error
- node, err = c.cmdNode(ctx, cmdInfo, slot)
+ node, err = c.cmdNode(ctx, cmd.Name(), slot)
if err != nil {
return err
}
}
if ask {
+ ask = false
+
pipe := node.Client.Pipeline()
_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
_ = pipe.Process(ctx, cmd)
_, lastErr = pipe.Exec(ctx)
- _ = pipe.Close()
- ask = false
} else {
lastErr = node.Client.Process(ctx, cmd)
}
@@ -851,6 +992,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
return lastErr
}
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(
@@ -1056,30 +1201,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
return nil, firstErr
}
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
func (c *ClusterClient) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
+ exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
@@ -1090,13 +1214,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error)
}
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
-}
-
-func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
- if err != nil {
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
setCmdsErr(cmds, err)
return err
}
@@ -1116,18 +1236,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1147,9 +1256,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return err
}
- if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := c.slotReadOnlyNode(state, slot)
if err != nil {
return err
@@ -1160,7 +1269,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
}
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
node, err := state.slotMasterNode(slot)
if err != nil {
return err
@@ -1170,9 +1279,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return nil
}
-func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
if cmdInfo == nil || !cmdInfo.ReadOnly {
return false
}
@@ -1180,22 +1289,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
return true
}
-func (c *ClusterClient) _processPipelineNode(
+func (c *ClusterClient) processPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
- return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
- })
- })
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
}
@@ -1206,7 +1335,7 @@ func (c *ClusterClient) pipelineReadCmds(
cmds []Cmder,
failedCmds *cmdsMap,
) error {
- for _, cmd := range cmds {
+ for i, cmd := range cmds {
err := cmd.readReply(rd)
cmd.SetErr(err)
@@ -1218,15 +1347,24 @@ func (c *ClusterClient) pipelineReadCmds(
continue
}
- if c.opt.ReadOnly && isLoadingError(err) {
+ if c.opt.ReadOnly {
node.MarkAsFailing()
- return err
}
- if isRedisError(err) {
- continue
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
+ return err
}
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
return err
}
+
return nil
}
@@ -1260,8 +1398,10 @@ func (c *ClusterClient) checkMovedErr(
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *ClusterClient) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
}
pipe.init()
return &pipe
@@ -1272,10 +1412,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
-}
-
-func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
@@ -1285,7 +1421,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return err
}
- cmdsMap := c.mapCmdsBySlot(cmds)
+ cmdsMap := c.mapCmdsBySlot(ctx, cmds)
for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot)
if err != nil {
@@ -1309,19 +1445,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
wg.Add(1)
go func(node *clusterNode, cmds []Cmder) {
defer wg.Done()
-
- err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
-
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
}(node, cmds)
}
@@ -1336,44 +1460,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return cmdsFirstErr(cmds)
}
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
+ slot := c.cmdSlot(ctx, cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd)
}
return cmdsMap
}
-func (c *ClusterClient) _processTxPipelineNode(
+func (c *ClusterClient) processTxPipelineNode(
ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
) error {
- return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
}
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
- if err != nil {
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
- }
- return err
- }
+ return err
+ }
- return pipelineReadCmds(rd, cmds)
- })
- })
+ return pipelineReadCmds(rd, trimmedCmds)
})
}
@@ -1406,12 +1555,7 @@ func (c *ClusterClient) txPipelineReadQueued(
return err
}
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
+ if line[0] != proto.RespArray {
return fmt.Errorf("redis: expected '*', but got line %q", line)
}
@@ -1568,6 +1712,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub
return pubsub
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@@ -1614,27 +1767,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
return nil, firstErr
}
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
return nil
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
}
return info
}
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" {
return args[2].(int)
}
- cmdInfo := c.cmdInfo(cmd.Name())
- return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd))
}
func cmdSlot(cmd Cmder, pos int) int {
@@ -1647,7 +1800,7 @@ func cmdSlot(cmd Cmder, pos int) int {
func (c *ClusterClient) cmdNode(
ctx context.Context,
- cmdInfo *CommandInfo,
+ cmdName string,
slot int,
) (*clusterNode, error) {
state, err := c.state.Get(ctx)
@@ -1655,13 +1808,16 @@ func (c *ClusterClient) cmdNode(
return nil, err
}
- if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
- return c.slotReadOnlyNode(state, slot)
+ if c.opt.ReadOnly {
+ cmdInfo := c.cmdInfo(ctx, cmdName)
+ if cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
}
return state.slotMasterNode(slot)
}
-func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
if c.opt.RouteByLatency {
return state.slotClosestNode(slot)
}
@@ -1708,6 +1864,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client,
return node.Client, err
}
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go
index 085bce8..b13f8e7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_commands.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_commands.go
@@ -8,7 +8,7 @@ import (
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
@@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
@@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
return shard.ScriptFlush(ctx).Err()
})
@@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo
result[i] = true
}
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go
index 6ee7364..3d2f807 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/cluster_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/osscluster_test.go
@@ -2,6 +2,8 @@ package redis_test
import (
"context"
+ "crypto/tls"
+ "errors"
"fmt"
"net"
"strconv"
@@ -9,11 +11,11 @@ import (
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
- "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/v9/internal/hashtag"
)
type clusterScenario struct {
@@ -82,8 +84,10 @@ func (s *clusterScenario) newClusterClient(
func (s *clusterScenario) Close() error {
for _, port := range s.ports {
- processes[port].Close()
- delete(processes, port)
+ if process, ok := processes[port]; ok {
+ process.Close()
+ delete(processes, port)
+ }
}
return nil
}
@@ -237,14 +241,6 @@ var _ = Describe("ClusterClient", func() {
var client *redis.ClusterClient
assertClusterClient := func() {
- It("supports WithContext", func() {
- ctx, cancel := context.WithCancel(ctx)
- cancel()
-
- err := client.Ping(ctx).Err()
- Expect(err).To(MatchError("context canceled"))
- })
-
It("should GET/SET/DEL", func() {
err := client.Get(ctx, "A").Err()
Expect(err).To(Equal(redis.Nil))
@@ -515,9 +511,7 @@ var _ = Describe("ClusterClient", func() {
pipe = client.Pipeline().(*redis.Pipeline)
})
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
+ AfterEach(func() {})
assertPipeline()
})
@@ -527,9 +521,7 @@ var _ = Describe("ClusterClient", func() {
pipe = client.TxPipeline().(*redis.Pipeline)
})
- AfterEach(func() {
- Expect(pipe.Close()).NotTo(HaveOccurred())
- })
+ AfterEach(func() {})
assertPipeline()
})
@@ -559,6 +551,30 @@ var _ = Describe("ClusterClient", func() {
}, 30*time.Second).ShouldNot(HaveOccurred())
})
+ It("supports sharded PubSub", func() {
+ pubsub := client.SSubscribe(ctx, "mychannel")
+ defer pubsub.Close()
+
+ Eventually(func() error {
+ _, err := client.SPublish(ctx, "mychannel", "hello").Result()
+ if err != nil {
+ return err
+ }
+
+ msg, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ if err != nil {
+ return err
+ }
+
+ _, ok := msg.(*redis.Message)
+ if !ok {
+ return fmt.Errorf("got %T, wanted *redis.Message", msg)
+ }
+
+ return nil
+ }, 30*time.Second).ShouldNot(HaveOccurred())
+ })
+
It("supports PubSub.Ping without channels", func() {
pubsub := client.Subscribe(ctx)
defer pubsub.Close()
@@ -568,9 +584,39 @@ var _ = Describe("ClusterClient", func() {
})
}
+ Describe("ClusterClient PROTO 2", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ opt.Protocol = 2
+ client = cluster.newClusterClient(ctx, opt)
+
+ err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
+ return master.FlushDB(ctx).Err()
+ })
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should CLUSTER PROTO 2", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
+ })
+
Describe("ClusterClient", func() {
BeforeEach(func() {
opt = redisClusterOptions()
+ opt.ClientName = "cluster_hi"
client = cluster.newClusterClient(ctx, opt)
err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
@@ -661,6 +707,90 @@ var _ = Describe("ClusterClient", func() {
Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
})
+ It("should CLUSTER SHARDS", func() {
+ res, err := client.ClusterShards(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).NotTo(BeEmpty())
+
+ // Iterate over the ClusterShard results and validate the fields.
+ for _, shard := range res {
+ Expect(shard.Slots).NotTo(BeEmpty())
+ for _, slotRange := range shard.Slots {
+ Expect(slotRange.Start).To(BeNumerically(">=", 0))
+ Expect(slotRange.End).To(BeNumerically(">=", slotRange.Start))
+ }
+
+ Expect(shard.Nodes).NotTo(BeEmpty())
+ for _, node := range shard.Nodes {
+ Expect(node.ID).NotTo(BeEmpty())
+ Expect(node.Endpoint).NotTo(BeEmpty())
+ Expect(node.IP).NotTo(BeEmpty())
+ Expect(node.Port).To(BeNumerically(">", 0))
+
+ validRoles := []string{"master", "slave", "replica"}
+ Expect(validRoles).To(ContainElement(node.Role))
+
+ Expect(node.ReplicationOffset).To(BeNumerically(">=", 0))
+
+ validHealthStatuses := []string{"online", "failed", "loading"}
+ Expect(validHealthStatuses).To(ContainElement(node.Health))
+ }
+ }
+ })
+
+ It("should CLUSTER LINKS", func() {
+ res, err := client.ClusterLinks(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).NotTo(BeEmpty())
+
+ // Iterate over the ClusterLink results and validate the map keys.
+ for _, link := range res {
+
+ Expect(link.Direction).NotTo(BeEmpty())
+ Expect([]string{"from", "to"}).To(ContainElement(link.Direction))
+ Expect(link.Node).NotTo(BeEmpty())
+ Expect(link.CreateTime).To(BeNumerically(">", 0))
+
+ Expect(link.Events).NotTo(BeEmpty())
+ validEventChars := []rune{'r', 'w'}
+ for _, eventChar := range link.Events {
+ Expect(validEventChars).To(ContainElement(eventChar))
+ }
+
+ Expect(link.SendBufferAllocated).To(BeNumerically(">=", 0))
+ Expect(link.SendBufferUsed).To(BeNumerically(">=", 0))
+ }
+ })
+
+ It("should cluster client setname", func() {
+ err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=cluster_hi"))
+ return nil
+ })
+ })
+
+ It("should CLUSTER PROTO 3", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
+
+ It("should CLUSTER MYSHARDID", func() {
+ shardID, err := client.ClusterMyShardID(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(shardID).ToNot(BeEmpty())
+ })
+
It("should CLUSTER NODES", func() {
res, err := client.ClusterNodes(ctx).Result()
Expect(err).NotTo(HaveOccurred())
@@ -737,6 +867,9 @@ var _ = Describe("ClusterClient", func() {
})
It("supports Process hook", func() {
+ testCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
err := client.Ping(ctx).Err()
Expect(err).NotTo(HaveOccurred())
@@ -748,29 +881,47 @@ var _ = Describe("ClusterClient", func() {
var stack []string
clusterHook := &hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ select {
+ case <-testCtx.Done():
+ return hook(ctx, cmd)
+ default:
+ }
+
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcess")
+
+ return err
+ }
},
}
client.AddHook(clusterHook)
nodeHook := &hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ select {
+ case <-testCtx.Done():
+ return hook(ctx, cmd)
+ default:
+ }
+
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+
+ return err
+ }
},
}
@@ -787,11 +938,6 @@ var _ = Describe("ClusterClient", func() {
"shard.AfterProcess",
"cluster.AfterProcess",
}))
-
- clusterHook.beforeProcess = nil
- clusterHook.afterProcess = nil
- nodeHook.beforeProcess = nil
- nodeHook.afterProcess = nil
})
It("supports Pipeline hook", func() {
@@ -806,33 +952,39 @@ var _ = Describe("ClusterClient", func() {
var stack []string
client.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+
+ return err
+ }
},
})
_ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
node.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -863,33 +1015,39 @@ var _ = Describe("ClusterClient", func() {
var stack []string
client.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "cluster.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "cluster.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "cluster.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "cluster.AfterProcessPipeline")
+
+ return err
+ }
},
})
_ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
node.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(3))
- Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -1182,16 +1340,17 @@ var _ = Describe("ClusterClient with unavailable Cluster", func() {
var client *redis.ClusterClient
BeforeEach(func() {
- for _, node := range cluster.clients {
- err := node.ClientPause(ctx, 5*time.Second).Err()
- Expect(err).NotTo(HaveOccurred())
- }
-
opt := redisClusterOptions()
opt.ReadTimeout = 250 * time.Millisecond
opt.WriteTimeout = 250 * time.Millisecond
opt.MaxRedirects = 1
client = cluster.newClusterClientUnstable(opt)
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+
+ for _, node := range cluster.clients {
+ err := node.ClientPause(ctx, 5*time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
})
AfterEach(func() {
@@ -1257,27 +1416,175 @@ var _ = Describe("ClusterClient timeout", func() {
Context("read/write timeout", func() {
BeforeEach(func() {
opt := redisClusterOptions()
- opt.ReadTimeout = 250 * time.Millisecond
- opt.WriteTimeout = 250 * time.Millisecond
- opt.MaxRedirects = 1
client = cluster.newClusterClient(ctx, opt)
err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
- return client.ClientPause(ctx, pause).Err()
+ err := client.ClientPause(ctx, pause).Err()
+
+ opt := client.Options()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = time.Nanosecond
+
+ return err
})
Expect(err).NotTo(HaveOccurred())
+
+ // Overwrite timeouts after the client is initialized.
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = time.Nanosecond
+ opt.MaxRedirects = 0
})
AfterEach(func() {
_ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error {
defer GinkgoRecover()
+
+ opt := client.Options()
+ opt.ReadTimeout = time.Second
+ opt.WriteTimeout = time.Second
+
Eventually(func() error {
return client.Ping(ctx).Err()
}, 2*pause).ShouldNot(HaveOccurred())
return nil
})
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
})
testTimeout()
})
})
+
+var _ = Describe("ClusterClient ParseURL", func() {
+ cases := []struct {
+ test string
+ url string
+ o *redis.ClusterOptions // expected value
+ err error
+ }{
+ {
+ test: "ParseRedisURL",
+ url: "redis://localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}},
+ }, {
+ test: "ParseRedissURL",
+ url: "rediss://localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "MissingRedisPort",
+ url: "redis://localhost",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}},
+ }, {
+ test: "MissingRedissPort",
+ url: "rediss://localhost",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "MultipleRedisURLs",
+ url: "redis://localhost:123?addr=localhost:1234&addr=localhost:12345",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}},
+ }, {
+ test: "MultipleRedissURLs",
+ url: "rediss://localhost:123?addr=localhost:1234&addr=localhost:12345",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}, TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "OnlyPassword",
+ url: "redis://:bar@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Password: "bar"},
+ }, {
+ test: "OnlyUser",
+ url: "redis://foo@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo"},
+ }, {
+ test: "RedisUsernamePassword",
+ url: "redis://foo:bar@localhost:123",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo", Password: "bar"},
+ }, {
+ test: "RedissUsernamePassword",
+ url: "rediss://foo:bar@localhost:123?addr=localhost:1234",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, Username: "foo", Password: "bar", TLSConfig: &tls.Config{ServerName: "localhost"}},
+ }, {
+ test: "QueryParameters",
+ url: "redis://localhost:123?read_timeout=2&pool_fifo=true&addr=localhost:1234",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, ReadTimeout: 2 * time.Second, PoolFIFO: true},
+ }, {
+ test: "DisabledTimeout",
+ url: "redis://localhost:123?conn_max_idle_time=0",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1},
+ }, {
+ test: "DisabledTimeoutNeg",
+ url: "redis://localhost:123?conn_max_idle_time=-1",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1},
+ }, {
+ test: "UseDefault",
+ url: "redis://localhost:123?conn_max_idle_time=",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0},
+ }, {
+ test: "Protocol",
+ url: "redis://localhost:123?protocol=2",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Protocol: 2},
+ }, {
+ test: "ClientName",
+ url: "redis://localhost:123?client_name=cluster_hi",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ClientName: "cluster_hi"},
+ }, {
+ test: "UseDefaultMissing=",
+ url: "redis://localhost:123?conn_max_idle_time",
+ o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0},
+ }, {
+ test: "InvalidQueryAddr",
+ url: "rediss://foo:bar@localhost:123?addr=rediss://foo:barr@localhost:1234",
+ err: errors.New(`redis: unable to parse addr param: rediss://foo:barr@localhost:1234`),
+ }, {
+ test: "InvalidInt",
+ url: "redis://localhost?pool_size=five",
+ err: errors.New(`redis: invalid pool_size number: strconv.Atoi: parsing "five": invalid syntax`),
+ }, {
+ test: "InvalidBool",
+ url: "redis://localhost?pool_fifo=yes",
+ err: errors.New(`redis: invalid pool_fifo boolean: expected true/false/1/0 or an empty string, got "yes"`),
+ }, {
+ test: "UnknownParam",
+ url: "redis://localhost?abc=123",
+ err: errors.New("redis: unexpected option: abc"),
+ }, {
+ test: "InvalidScheme",
+ url: "https://google.com",
+ err: errors.New("redis: invalid URL scheme: https"),
+ },
+ }
+
+ It("match ParseClusterURL", func() {
+ for i := range cases {
+ tc := cases[i]
+ actual, err := redis.ParseClusterURL(tc.url)
+ if tc.err != nil {
+ Expect(err).Should(MatchError(tc.err))
+ } else {
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ if err == nil {
+ Expect(tc.o).NotTo(BeNil())
+
+ Expect(tc.o.Addrs).To(Equal(actual.Addrs))
+ Expect(tc.o.TLSConfig).To(Equal(actual.TLSConfig))
+ Expect(tc.o.Username).To(Equal(actual.Username))
+ Expect(tc.o.Password).To(Equal(actual.Password))
+ Expect(tc.o.MaxRetries).To(Equal(actual.MaxRetries))
+ Expect(tc.o.MinRetryBackoff).To(Equal(actual.MinRetryBackoff))
+ Expect(tc.o.MaxRetryBackoff).To(Equal(actual.MaxRetryBackoff))
+ Expect(tc.o.DialTimeout).To(Equal(actual.DialTimeout))
+ Expect(tc.o.ReadTimeout).To(Equal(actual.ReadTimeout))
+ Expect(tc.o.WriteTimeout).To(Equal(actual.WriteTimeout))
+ Expect(tc.o.PoolFIFO).To(Equal(actual.PoolFIFO))
+ Expect(tc.o.PoolSize).To(Equal(actual.PoolSize))
+ Expect(tc.o.MinIdleConns).To(Equal(actual.MinIdleConns))
+ Expect(tc.o.ConnMaxLifetime).To(Equal(actual.ConnMaxLifetime))
+ Expect(tc.o.ConnMaxIdleTime).To(Equal(actual.ConnMaxIdleTime))
+ Expect(tc.o.PoolTimeout).To(Equal(actual.PoolTimeout))
+ }
+ }
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go
index 31bab97..1c11420 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline.go
@@ -2,9 +2,7 @@ package redis
import (
"context"
- "sync"
-
- "github.com/go-redis/redis/v8/internal/pool"
+ "errors"
)
type pipelineExecer func(context.Context, []Cmder) error
@@ -13,7 +11,7 @@ type pipelineExecer func(context.Context, []Cmder) error
//
// Pipelining is a technique to extremely speed up processing by packing
// operations to batches, send them at once to Redis and read a replies in a
-// singe step.
+// single step.
// See https://redis.io/topics/pipelining
//
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
@@ -24,29 +22,35 @@ type pipelineExecer func(context.Context, []Cmder) error
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
+
+ // Len is to obtain the number of commands in the pipeline that have not yet been executed.
Len() int
+
+ // Do is an API for executing any command.
+ // If a certain Redis command is not yet supported, you can use Do to execute it.
Do(ctx context.Context, args ...interface{}) *Cmd
+
+ // Process is to put the commands to be executed into the pipeline buffer.
Process(ctx context.Context, cmd Cmder) error
- Close() error
- Discard() error
+
+ // Discard is to discard all commands in the cache that have not yet been executed.
+ Discard()
+
+ // Exec is to send all the commands buffered in the pipeline to the redis-server.
Exec(ctx context.Context) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
// Pipeline implements pipelining as described in
-// http://redis.io/topics/pipelining. It's safe for concurrent use
-// by multiple goroutines.
+// http://redis.io/topics/pipelining.
+// Please note: it is not safe for concurrent use by multiple goroutines.
type Pipeline struct {
cmdable
statefulCmdable
- ctx context.Context
exec pipelineExecer
-
- mu sync.Mutex
- cmds []Cmder
- closed bool
+ cmds []Cmder
}
func (c *Pipeline) init() {
@@ -56,50 +60,29 @@ func (c *Pipeline) init() {
// Len returns the number of queued commands.
func (c *Pipeline) Len() int {
- c.mu.Lock()
- ln := len(c.cmds)
- c.mu.Unlock()
- return ln
+ return len(c.cmds)
}
// Do queues the custom command for later execution.
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
+ if len(args) == 0 {
+ cmd.SetErr(errors.New("redis: please enter the command to be executed"))
+ return cmd
+ }
_ = c.Process(ctx, cmd)
return cmd
}
// Process queues the cmd for later execution.
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
- c.mu.Lock()
c.cmds = append(c.cmds, cmd)
- c.mu.Unlock()
- return nil
-}
-
-// Close closes the pipeline, releasing any open resources.
-func (c *Pipeline) Close() error {
- c.mu.Lock()
- _ = c.discard()
- c.closed = true
- c.mu.Unlock()
return nil
}
// Discard resets the pipeline and discards queued commands.
-func (c *Pipeline) Discard() error {
- c.mu.Lock()
- err := c.discard()
- c.mu.Unlock()
- return err
-}
-
-func (c *Pipeline) discard() error {
- if c.closed {
- return pool.ErrClosed
- }
+func (c *Pipeline) Discard() {
c.cmds = c.cmds[:0]
- return nil
}
// Exec executes all previously queued commands using one
@@ -108,13 +91,6 @@ func (c *Pipeline) discard() error {
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
if len(c.cmds) == 0 {
return nil, nil
}
@@ -129,9 +105,7 @@ func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]C
if err := fn(c); err != nil {
return nil, err
}
- cmds, err := c.Exec(ctx)
- _ = c.Close()
- return cmds, err
+ return c.Exec(ctx)
}
func (c *Pipeline) Pipeline() Pipeliner {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go
index f24114d..7f73447 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pipeline_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pipeline_test.go
@@ -1,12 +1,13 @@
package redis_test
import (
+ "errors"
"strconv"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("pipelining", func() {
@@ -70,7 +71,7 @@ var _ = Describe("pipelining", func() {
Expect(cmds).To(HaveLen(1))
})
- It("handles large pipelines", func() {
+ It("handles large pipelines", Label("NonRedisEnterprise"), func() {
for callCount := 1; callCount < 16; callCount++ {
for i := 1; i <= callCount; i++ {
pipe.SetNX(ctx, strconv.Itoa(i)+"_key", strconv.Itoa(i)+"_value", 0)
@@ -84,6 +85,11 @@ var _ = Describe("pipelining", func() {
}
}
})
+
+ It("should Exec, not Do", func() {
+ err := pipe.Do(ctx).Err()
+ Expect(err).To(Equal(errors.New("redis: please enter the command to be executed")))
+ })
}
Describe("Pipeline", func() {
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go
index dbef72e..1864e88 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pool_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pool_test.go
@@ -4,10 +4,10 @@ import (
"context"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("pool", func() {
@@ -16,8 +16,8 @@ var _ = Describe("pool", func() {
BeforeEach(func() {
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.IdleTimeout = time.Second
+ opt.ConnMaxLifetime = 0
+ opt.ConnMaxIdleTime = time.Second
client = redis.NewClient(opt)
})
@@ -72,7 +72,6 @@ var _ = Describe("pool", func() {
Expect(cmds).To(HaveLen(1))
Expect(ping.Err()).NotTo(HaveOccurred())
Expect(ping.Val()).To(Equal("PONG"))
- Expect(pipe.Close()).NotTo(HaveOccurred())
})
pool := client.Pool()
@@ -87,13 +86,14 @@ var _ = Describe("pool", func() {
cn.SetNetConn(&badConn{})
client.Pool().Put(ctx, cn)
- err = client.Ping(ctx).Err()
- Expect(err).To(MatchError("bad connection"))
-
val, err := client.Ping(ctx).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(Equal("PONG"))
+ val, err = client.Ping(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+
pool := client.Pool()
Expect(pool.Len()).To(Equal(1))
Expect(pool.IdleLen()).To(Equal(1))
@@ -105,11 +105,11 @@ var _ = Describe("pool", func() {
})
It("reuses connections", func() {
- // explain: https://github.com/go-redis/redis/pull/1675
+ // explain: https://github.com/redis/go-redis/pull/1675
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.IdleTimeout = 2 * time.Second
+ opt.ConnMaxLifetime = 0
+ opt.ConnMaxIdleTime = 10 * time.Second
client = redis.NewClient(opt)
for i := 0; i < 100; i++ {
@@ -127,31 +127,4 @@ var _ = Describe("pool", func() {
Expect(stats.Misses).To(Equal(uint32(1)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
-
- It("removes idle connections", func() {
- err := client.Ping(ctx).Err()
- Expect(err).NotTo(HaveOccurred())
-
- stats := client.PoolStats()
- Expect(stats).To(Equal(&redis.PoolStats{
- Hits: 0,
- Misses: 1,
- Timeouts: 0,
- TotalConns: 1,
- IdleConns: 1,
- StaleConns: 0,
- }))
-
- time.Sleep(2 * time.Second)
-
- stats = client.PoolStats()
- Expect(stats).To(Equal(&redis.PoolStats{
- Hits: 0,
- Misses: 1,
- Timeouts: 0,
- TotalConns: 0,
- IdleConns: 0,
- StaleConns: 1,
- }))
- })
})
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go
new file mode 100644
index 0000000..5d5cd1a
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic.go
@@ -0,0 +1,1429 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type ProbabilisticCmdable interface {
+ BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFCard(ctx context.Context, key string) *IntCmd
+ BFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFInfo(ctx context.Context, key string) *BFInfoCmd
+ BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd
+ BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd
+ BFInfoSize(ctx context.Context, key string) *BFInfoCmd
+ BFInfoFilters(ctx context.Context, key string) *BFInfoCmd
+ BFInfoItems(ctx context.Context, key string) *BFInfoCmd
+ BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd
+ BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd
+ BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd
+ BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFCount(ctx context.Context, key string, element interface{}) *IntCmd
+ CFDel(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFInfo(ctx context.Context, key string) *CFInfoCmd
+ CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd
+ CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd
+ CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd
+ CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd
+ CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd
+ CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd
+ CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ CMSInfo(ctx context.Context, key string) *CMSInfoCmd
+ CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd
+ CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd
+ CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd
+ CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd
+ CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+
+ TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKInfo(ctx context.Context, key string) *TopKInfoCmd
+ TopKList(ctx context.Context, key string) *StringSliceCmd
+ TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd
+ TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ TopKReserve(ctx context.Context, key string, k int64) *StatusCmd
+ TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd
+
+ TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd
+ TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestCreate(ctx context.Context, key string) *StatusCmd
+ TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd
+ TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd
+ TDigestMax(ctx context.Context, key string) *FloatCmd
+ TDigestMin(ctx context.Context, key string) *FloatCmd
+ TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd
+ TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestReset(ctx context.Context, key string) *StatusCmd
+ TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd
+}
+
+type BFInsertOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+ NoCreate bool
+}
+
+type BFReserveOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+}
+
+type CFReserveOptions struct {
+ Capacity int64
+ BucketSize int64
+ MaxIterations int64
+ Expansion int64
+}
+
+type CFInsertOptions struct {
+ Capacity int64
+ NoCreate bool
+}
+
+// -------------------------------------------
+// Bloom filter commands
+//-------------------------------------------
+
+// BFReserve creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveExpansion creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying an expansion rate for the filter.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying that the filter should not scale.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveWithArgs creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying additional options such as expansion rate and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key}
+ if options != nil {
+ args = append(args, options.Error, options.Capacity)
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFAdd adds an item to a Bloom filter.
+// For more information - https://redis.io/commands/bf.add/
+func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFCard returns the cardinality of a Bloom filter -
+// number of items that were added to a Bloom filter and detected as unique
+// (items that caused at least one bit to be set in at least one sub-filter).
+// For more information - https://redis.io/commands/bf.card/
+func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd {
+ args := []interface{}{"BF.CARD", key}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFExists determines whether a given item was added to a Bloom filter.
+// For more information - https://redis.io/commands/bf.exists/
+func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP.
+// For more information - https://redis.io/commands/bf.loadchunk/
+func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"BF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Begins an incremental save of the Bloom filter.
+// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model.
+// For more information - https://redis.io/commands/bf.scandump/
+func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"BF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ScanDump struct {
+ Iter int64
+ Data string
+}
+
+type ScanDumpCmd struct {
+ baseCmd
+
+ val ScanDump
+}
+
+func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd {
+ return &ScanDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ScanDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ScanDumpCmd) SetVal(val ScanDump) {
+ cmd.val = val
+}
+
+func (cmd *ScanDumpCmd) Result() (ScanDump, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ScanDumpCmd) Val() ScanDump {
+ return cmd.val
+}
+
+func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = ScanDump{}
+ for i := 0; i < n; i++ {
+ iter, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ data, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Data = data
+ cmd.val.Iter = iter
+
+ }
+
+ return nil
+}
+
+// Returns information about a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BFInfo struct {
+ Capacity int64
+ Size int64
+ Filters int64
+ ItemsInserted int64
+ ExpansionRate int64
+}
+
+type BFInfoCmd struct {
+ baseCmd
+
+ val BFInfo
+}
+
+func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd {
+ return &BFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BFInfoCmd) SetVal(val BFInfo) {
+ cmd.val = val
+}
+
+func (cmd *BFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BFInfoCmd) Val() BFInfo {
+ return cmd.val
+}
+
+func (cmd *BFInfoCmd) Result() (BFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result BFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of filters":
+ result.Filters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.ItemsInserted, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// BFInfoCapacity returns information about the capacity of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "CAPACITY")
+}
+
+// BFInfoSize returns information about the size of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "SIZE")
+}
+
+// BFInfoFilters returns information about the filters of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "FILTERS")
+}
+
+// BFInfoItems returns information about the items of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "ITEMS")
+}
+
+// BFInfoExpansion returns information about the expansion rate of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "EXPANSION")
+}
+
+// BFInfoArg returns information about a specific option of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key, option}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFInsert inserts elements into a Bloom filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.insert/
+func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.INSERT", key}
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.Error != 0 {
+ args = append(args, "ERROR", options.Error)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMAdd adds multiple elements to a Bloom filter.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/bf.madd/
+func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MADD", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMExists check if multiple elements exist in a Bloom filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/bf.mexists/
+func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MEXISTS", key}
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// Cuckoo filter commands
+//-------------------------------------------
+
+// CFReserve creates an empty Cuckoo filter with the specified capacity.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveWithArgs creates an empty Cuckoo filter with the specified options.
+// This function allows for specifying additional options such as bucket size and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, options.Capacity}
+ if options.BucketSize != 0 {
+ args = append(args, "BUCKETSIZE", options.BucketSize)
+ }
+ if options.MaxIterations != 0 {
+ args = append(args, "MAXITERATIONS", options.MaxIterations)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAdd adds an element to a Cuckoo filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.add/
+func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.addnx/
+func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADDNX", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter.
+// For more information - https://redis.io/commands/cf.count/
+func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd {
+ args := []interface{}{"CF.COUNT", key, element}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFDel deletes an item once from the cuckoo filter.
+// For more information - https://redis.io/commands/cf.del/
+func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.DEL", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFExists determines whether an item may exist in the Cuckoo Filter or not.
+// For more information - https://redis.io/commands/cf.exists/
+func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFLoadChunk restores a filter previously saved using SCANDUMP.
+// For more information - https://redis.io/commands/cf.loadchunk/
+func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"CF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFScanDump begins an incremental save of the cuckoo filter.
+// For more information - https://redis.io/commands/cf.scandump/
+func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"CF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CFInfo struct {
+ Size int64
+ NumBuckets int64
+ NumFilters int64
+ NumItemsInserted int64
+ NumItemsDeleted int64
+ BucketSize int64
+ ExpansionRate int64
+ MaxIteration int64
+}
+
+type CFInfoCmd struct {
+ baseCmd
+
+ val CFInfo
+}
+
+func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd {
+ return &CFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CFInfoCmd) SetVal(val CFInfo) {
+ cmd.val = val
+}
+
+func (cmd *CFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CFInfoCmd) Val() CFInfo {
+ return cmd.val
+}
+
+func (cmd *CFInfoCmd) Result() (CFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of buckets":
+ result.NumBuckets, err = rd.ReadInt()
+ case "Number of filters":
+ result.NumFilters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.NumItemsInserted, err = rd.ReadInt()
+ case "Number of items deleted":
+ result.NumItemsDeleted, err = rd.ReadInt()
+ case "Bucket size":
+ result.BucketSize, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ case "Max iterations":
+ result.MaxIteration, err = rd.ReadInt()
+
+ default:
+ return fmt.Errorf("redis: CF.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CFInfo returns information about a Cuckoo filter.
+// For more information - https://redis.io/commands/cf.info/
+func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd {
+ args := []interface{}{"CF.INFO", key}
+ cmd := NewCFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsert inserts elements into a Cuckoo filter.
+// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insert/
+func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.INSERT", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of integers indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insertnx/
+func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CF.INSERTNX", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) getCfInsertWithArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} {
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ return args
+}
+
+// CFMExists check if multiple elements exist in a Cuckoo filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/cf.mexists/
+func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.MEXISTS", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// CMS commands
+//-------------------------------------------
+
+// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter.
+// Returns an array of integers representing the updated count of each item.
+// For more information - https://redis.io/commands/cms.incrby/
+func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "CMS.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CMSInfo struct {
+ Width int64
+ Depth int64
+ Count int64
+}
+
+type CMSInfoCmd struct {
+ baseCmd
+
+ val CMSInfo
+}
+
+func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd {
+ return &CMSInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CMSInfoCmd) SetVal(val CMSInfo) {
+ cmd.val = val
+}
+
+func (cmd *CMSInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CMSInfoCmd) Val() CMSInfo {
+ return cmd.val
+}
+
+func (cmd *CMSInfoCmd) Result() (CMSInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CMSInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "count":
+ result.Count, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: CMS.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CMSInfo returns information about a Count-Min Sketch filter.
+// For more information - https://redis.io/commands/cms.info/
+func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd {
+ args := []interface{}{"CMS.INFO", key}
+ cmd := NewCMSInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions.
+// For more information - https://redis.io/commands/cms.initbydim/
+func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYDIM", key, width, depth}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability.
+// For more information - https://redis.io/commands/cms.initbyprob/
+func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMerge merges multiple Count-Min Sketch filters into a single filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)}
+ for _, s := range sourceKeys {
+ args = append(args, s)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd {
+ args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1))
+ args = append(args, "CMS.MERGE", destKey, len(sourceKeys))
+
+ if len(sourceKeys) > 0 {
+ sk := make([]interface{}, len(sourceKeys))
+ sw := make([]interface{}, len(sourceKeys))
+
+ i := 0
+ for k, w := range sourceKeys {
+ sk[i] = k
+ sw[i] = w
+ i++
+ }
+
+ args = append(args, sk...)
+ args = append(args, "WEIGHTS")
+ args = append(args, sw...)
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSQuery returns count for item(s).
+// For more information - https://redis.io/commands/cms.query/
+func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CMS.QUERY", key}
+ args = append(args, elements...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// TopK commands
+//--------------------------------------------
+
+// TopKAdd adds one or more elements to a Top-K filter.
+// Returns an array of strings representing the items that were removed from the filter, if any.
+// For more information - https://redis.io/commands/topk.add/
+func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.ADD"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserve creates an empty Top-K filter with the specified number of top items to keep.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options.
+// This function allows for specifying additional options such as width, depth and decay.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TopKInfo struct {
+ K int64
+ Width int64
+ Depth int64
+ Decay float64
+}
+
+type TopKInfoCmd struct {
+ baseCmd
+
+ val TopKInfo
+}
+
+func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd {
+ return &TopKInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TopKInfoCmd) SetVal(val TopKInfo) {
+ cmd.val = val
+}
+
+func (cmd *TopKInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TopKInfoCmd) Val() TopKInfo {
+ return cmd.val
+}
+
+func (cmd *TopKInfoCmd) Result() (TopKInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TopKInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "k":
+ result.K, err = rd.ReadInt()
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "decay":
+ result.Decay, err = rd.ReadFloat()
+ default:
+ return fmt.Errorf("redis: topk.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TopKInfo returns information about a Top-K filter.
+// For more information - https://redis.io/commands/topk.info/
+func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd {
+ args := []interface{}{"TOPK.INFO", key}
+
+ cmd := NewTopKInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKQuery check if multiple elements exist in a Top-K filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/topk.query/
+func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.QUERY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKCount returns an estimate of the number of times an item may be in a Top-K filter.
+// For more information - https://redis.io/commands/topk.count/
+func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.COUNT"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKIncrBy increases the count of one or more items in a Top-K filter.
+// For more information - https://redis.io/commands/topk.incrby/
+func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKList returns all items in Top-K list.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd {
+ args := []interface{}{"TOPK.LIST", key}
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKListWithCount returns all items in Top-K list with their respective count.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd {
+ args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"}
+
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// t-digest commands
+// --------------------------------------------
+
+// TDigestAdd adds one or more elements to a t-Digest data structure.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.add/
+func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.ADD"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRank returns an array of values from a t-Digest data structure based on their rank.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrank/
+func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrevrank/
+func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYREVRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure.
+// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.cdf/
+func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.CDF"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreate creates an empty t-Digest data structure with default parameters.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter.
+// The compression parameter controls the accuracy and memory usage of the t-Digest.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestInfo struct {
+ Compression int64
+ Capacity int64
+ MergedNodes int64
+ UnmergedNodes int64
+ MergedWeight int64
+ UnmergedWeight int64
+ Observations int64
+ TotalCompressions int64
+ MemoryUsage int64
+}
+
+type TDigestInfoCmd struct {
+ baseCmd
+
+ val TDigestInfo
+}
+
+func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd {
+ return &TDigestInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) {
+ cmd.val = val
+}
+
+func (cmd *TDigestInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TDigestInfoCmd) Val() TDigestInfo {
+ return cmd.val
+}
+
+func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TDigestInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Compression":
+ result.Compression, err = rd.ReadInt()
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Merged nodes":
+ result.MergedNodes, err = rd.ReadInt()
+ case "Unmerged nodes":
+ result.UnmergedNodes, err = rd.ReadInt()
+ case "Merged weight":
+ result.MergedWeight, err = rd.ReadInt()
+ case "Unmerged weight":
+ result.UnmergedWeight, err = rd.ReadInt()
+ case "Observations":
+ result.Observations, err = rd.ReadInt()
+ case "Total compressions":
+ result.TotalCompressions, err = rd.ReadInt()
+ case "Memory usage":
+ result.MemoryUsage, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: tdigest.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TDigestInfo returns information about a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.info/
+func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd {
+ args := []interface{}{"TDIGEST.INFO", key}
+
+ cmd := NewTDigestInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMax returns the maximum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.max/
+func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MAX", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestMergeOptions struct {
+ Compression int64
+ Override bool
+}
+
+// TDigestMerge merges multiple t-Digest data structures into a single t-Digest.
+// This function also allows for specifying additional options such as compression and override behavior.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.merge/
+func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)}
+
+ for _, sourceKey := range sourceKeys {
+ args = append(args, sourceKey)
+ }
+
+ if options != nil {
+ if options.Compression != 0 {
+ args = append(args, "COMPRESSION", options.Compression)
+ }
+ if options.Override {
+ args = append(args, "OVERRIDE")
+ }
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMin returns the minimum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.min/
+func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MIN", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure.
+// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.quantile/
+func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.QUANTILE"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.rank/
+func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.RANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestReset resets a t-Digest data structure to its initial state.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.reset/
+func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.RESET", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.revrank/
+func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.REVRANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure.
+// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values.
+// Returns a float representing the trimmed mean value or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.trimmed_mean/
+func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd {
+ args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go
new file mode 100644
index 0000000..0610c51
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/probabilistic_test.go
@@ -0,0 +1,733 @@
+package redis_test
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("Probabilistic commands", Label("probabilistic"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: ":6379"})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("bloom", Label("bloom"), func() {
+ It("should BFAdd", Label("bloom", "bfadd"), func() {
+ resultAdd, err := client.BFAdd(ctx, "testbf1", 1).Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeTrue())
+
+ resultInfo, err := client.BFInfo(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(resultInfo.ItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should BFCard", Label("bloom", "bfcard"), func() {
+ // This is a probabilistic data structure, and it's not always guaranteed that we will get back
+ // the exact number of inserted items, during hash collisions
+ // But with such a low number of items (only 3),
+ // the probability of a collision is very low, so we can expect to get back the exact number of items
+ _, err := client.BFAdd(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.BFAdd(ctx, "testbf1", "item2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.BFAdd(ctx, "testbf1", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFCard(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFExists", Label("bloom", "bfexists"), func() {
+ exists, err := client.BFExists(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeFalse())
+
+ _, err = client.BFAdd(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ exists, err = client.BFExists(ctx, "testbf1", "item1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+ })
+
+ It("should BFInfo and BFReserve", Label("bloom", "bfinfo", "bfreserve"), func() {
+ err := client.BFReserve(ctx, "testbf1", 0.001, 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ })
+
+ It("should BFInfoCapacity, BFInfoSize, BFInfoFilters, BFInfoItems, BFInfoExpansion, ", Label("bloom", "bfinfocapacity", "bfinfosize", "bfinfofilters", "bfinfoitems", "bfinfoexpansion"), func() {
+ err := client.BFReserve(ctx, "testbf1", 0.001, 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfoCapacity(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+
+ result, err = client.BFInfoItems(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.ItemsInserted).To(BeEquivalentTo(int64(0)))
+
+ result, err = client.BFInfoSize(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Size).To(BeEquivalentTo(int64(4056)))
+
+ err = client.BFReserveExpansion(ctx, "testbf2", 0.001, 2000, 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.BFInfoFilters(ctx, "testbf2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Filters).To(BeEquivalentTo(int64(1)))
+
+ result, err = client.BFInfoExpansion(ctx, "testbf2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFInsert", Label("bloom", "bfinsert"), func() {
+ options := &redis.BFInsertOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ NoCreate: true,
+ }
+
+ resultInsert, err := client.BFInsert(ctx, "testbf1", options, "item1").Result()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("ERR not found"))
+
+ options = &redis.BFInsertOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ NoCreate: false,
+ }
+
+ resultInsert, err = client.BFInsert(ctx, "testbf1", options, "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultInsert)).To(BeEquivalentTo(1))
+
+ exists, err := client.BFExists(ctx, "testbf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFMAdd", Label("bloom", "bfmadd"), func() {
+ resultAdd, err := client.BFMAdd(ctx, "testbf1", "item1", "item2", "item3").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultAdd)).To(Equal(3))
+
+ resultInfo, err := client.BFInfo(ctx, "testbf1").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(resultInfo.ItemsInserted).To(BeEquivalentTo(int64(3)))
+ resultAdd2, err := client.BFMAdd(ctx, "testbf1", "item1", "item2", "item4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd2[0]).To(BeFalse())
+ Expect(resultAdd2[1]).To(BeFalse())
+ Expect(resultAdd2[2]).To(BeTrue())
+ })
+
+ It("should BFMExists", Label("bloom", "bfmexists"), func() {
+ exist, err := client.BFMExists(ctx, "testbf1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(exist)).To(Equal(3))
+ Expect(exist[0]).To(BeFalse())
+ Expect(exist[1]).To(BeFalse())
+ Expect(exist[2]).To(BeFalse())
+
+ _, err = client.BFMAdd(ctx, "testbf1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ exist, err = client.BFMExists(ctx, "testbf1", "item1", "item2", "item3", "item4").Result()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(exist)).To(Equal(4))
+ Expect(exist[0]).To(BeTrue())
+ Expect(exist[1]).To(BeTrue())
+ Expect(exist[2]).To(BeTrue())
+ Expect(exist[3]).To(BeFalse())
+ })
+
+ It("should BFReserveExpansion", Label("bloom", "bfreserveexpansion"), func() {
+ err := client.BFReserveExpansion(ctx, "testbf1", 0.001, 2000, 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should BFReserveNonScaling", Label("bloom", "bfreservenonscaling"), func() {
+ err := client.BFReserveNonScaling(ctx, "testbfns1", 0.001, 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.BFInfo(ctx, "testbfns1").Result()
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should BFScanDump and BFLoadChunk", Label("bloom", "bfscandump", "bfloadchunk"), func() {
+ err := client.BFReserve(ctx, "testbfsd1", 0.001, 3000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ for i := 0; i < 1000; i++ {
+ client.BFAdd(ctx, "testbfsd1", i)
+ }
+ infBefore := client.BFInfoSize(ctx, "testbfsd1")
+ fd := []redis.ScanDump{}
+ sd, err := client.BFScanDump(ctx, "testbfsd1", 0).Result()
+ for {
+ if sd.Iter == 0 {
+ break
+ }
+ Expect(err).NotTo(HaveOccurred())
+ fd = append(fd, sd)
+ sd, err = client.BFScanDump(ctx, "testbfsd1", sd.Iter).Result()
+ }
+ client.Del(ctx, "testbfsd1")
+ for _, e := range fd {
+ client.BFLoadChunk(ctx, "testbfsd1", e.Iter, e.Data)
+ }
+ infAfter := client.BFInfoSize(ctx, "testbfsd1")
+ Expect(infBefore).To(BeEquivalentTo(infAfter))
+ })
+
+ It("should BFReserveWithArgs", Label("bloom", "bfreserveargs"), func() {
+ options := &redis.BFReserveOptions{
+ Capacity: 2000,
+ Error: 0.001,
+ Expansion: 3,
+ NonScaling: false,
+ }
+ err := client.BFReserveWithArgs(ctx, "testbf", options).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.BFInfo(ctx, "testbf").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.BFInfo{}))
+ Expect(result.Capacity).To(BeEquivalentTo(int64(2000)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(3)))
+ })
+ })
+
+ Describe("cuckoo", Label("cuckoo"), func() {
+ It("should CFAdd", Label("cuckoo", "cfadd"), func() {
+ add, err := client.CFAdd(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(BeTrue())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ info, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(info.NumItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should CFAddNX", Label("cuckoo", "cfaddnx"), func() {
+ add, err := client.CFAddNX(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(add).To(BeTrue())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ result, err := client.CFAddNX(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeFalse())
+
+ info, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(info.NumItemsInserted).To(BeEquivalentTo(int64(1)))
+ })
+
+ It("should CFCount", Label("cuckoo", "cfcount"), func() {
+ err := client.CFAdd(ctx, "testcf1", "item1").Err()
+ cnt, err := client.CFCount(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(BeEquivalentTo(int64(1)))
+
+ err = client.CFAdd(ctx, "testcf1", "item1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ cnt, err = client.CFCount(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(BeEquivalentTo(int64(2)))
+ })
+
+ It("should CFDel and CFExists", Label("cuckoo", "cfdel", "cfexists"), func() {
+ err := client.CFAdd(ctx, "testcf1", "item1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ exists, err := client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeTrue())
+
+ del, err := client.CFDel(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(del).To(BeTrue())
+
+ exists, err = client.CFExists(ctx, "testcf1", "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(exists).To(BeFalse())
+ })
+
+ It("should CFInfo and CFReserve", Label("cuckoo", "cfinfo", "cfreserve"), func() {
+ err := client.CFReserve(ctx, "testcf1", 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveExpansion(ctx, "testcfe1", 1000, 1).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveBucketSize(ctx, "testcfbs1", 1000, 4).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CFReserveMaxIterations(ctx, "testcfmi1", 1000, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ })
+
+ It("should CFScanDump and CFLoadChunk", Label("bloom", "cfscandump", "cfloadchunk"), func() {
+ err := client.CFReserve(ctx, "testcfsd1", 1000).Err()
+ Expect(err).NotTo(HaveOccurred())
+ for i := 0; i < 1000; i++ {
+ Item := fmt.Sprintf("item%d", i)
+ client.CFAdd(ctx, "testcfsd1", Item)
+ }
+ infBefore := client.CFInfo(ctx, "testcfsd1")
+ fd := []redis.ScanDump{}
+ sd, err := client.CFScanDump(ctx, "testcfsd1", 0).Result()
+ for {
+ if sd.Iter == 0 {
+ break
+ }
+ Expect(err).NotTo(HaveOccurred())
+ fd = append(fd, sd)
+ sd, err = client.CFScanDump(ctx, "testcfsd1", sd.Iter).Result()
+ }
+ client.Del(ctx, "testcfsd1")
+ for _, e := range fd {
+ client.CFLoadChunk(ctx, "testcfsd1", e.Iter, e.Data)
+ }
+ infAfter := client.CFInfo(ctx, "testcfsd1")
+ Expect(infBefore).To(BeEquivalentTo(infAfter))
+ })
+
+ It("should CFInfo and CFReserveWithArgs", Label("cuckoo", "cfinfo", "cfreserveargs"), func() {
+ args := &redis.CFReserveOptions{
+ Capacity: 2048,
+ BucketSize: 3,
+ MaxIterations: 15,
+ Expansion: 2,
+ }
+
+ err := client.CFReserveWithArgs(ctx, "testcf1", args).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFInfo(ctx, "testcf1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeAssignableToTypeOf(redis.CFInfo{}))
+ Expect(result.BucketSize).To(BeEquivalentTo(int64(3)))
+ Expect(result.MaxIteration).To(BeEquivalentTo(int64(15)))
+ Expect(result.ExpansionRate).To(BeEquivalentTo(int64(2)))
+ })
+
+ It("should CFInsert", Label("cuckoo", "cfinsert"), func() {
+ args := &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: true,
+ }
+
+ result, err := client.CFInsert(ctx, "testcf1", args, "item1", "item2", "item3").Result()
+ Expect(err).To(HaveOccurred())
+
+ args = &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: false,
+ }
+
+ result, err = client.CFInsert(ctx, "testcf1", args, "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ })
+
+ It("should CFInsertNX", Label("cuckoo", "cfinsertnx"), func() {
+ args := &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: true,
+ }
+
+ result, err := client.CFInsertNX(ctx, "testcf1", args, "item1", "item2", "item2").Result()
+ Expect(err).To(HaveOccurred())
+
+ args = &redis.CFInsertOptions{
+ Capacity: 3000,
+ NoCreate: false,
+ }
+
+ result, err = client.CFInsertNX(ctx, "testcf2", args, "item1", "item2", "item2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(1)))
+ Expect(result[2]).To(BeEquivalentTo(int64(0)))
+ })
+
+ It("should CFMexists", Label("cuckoo", "cfmexists"), func() {
+ err := client.CFInsert(ctx, "testcf1", nil, "item1", "item2", "item3").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CFMExists(ctx, "testcf1", "item1", "item2", "item3", "item4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(4))
+ Expect(result[0]).To(BeTrue())
+ Expect(result[1]).To(BeTrue())
+ Expect(result[2]).To(BeTrue())
+ Expect(result[3]).To(BeFalse())
+ })
+ })
+
+ Describe("CMS", Label("cms"), func() {
+ It("should CMSIncrBy", Label("cms", "cmsincrby"), func() {
+ err := client.CMSInitByDim(ctx, "testcms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CMSIncrBy(ctx, "testcms1", "item1", 1, "item2", 2, "item3", 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(2)))
+ Expect(result[2]).To(BeEquivalentTo(int64(3)))
+ })
+
+ It("should CMSInitByDim and CMSInfo", Label("cms", "cmsinitbydim", "cmsinfo"), func() {
+ err := client.CMSInitByDim(ctx, "testcms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.CMSInfo(ctx, "testcms1").Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(info).To(BeAssignableToTypeOf(redis.CMSInfo{}))
+ Expect(info.Width).To(BeEquivalentTo(int64(5)))
+ Expect(info.Depth).To(BeEquivalentTo(int64(10)))
+ })
+
+ It("should CMSInitByProb", Label("cms", "cmsinitbyprob"), func() {
+ err := client.CMSInitByProb(ctx, "testcms1", 0.002, 0.01).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.CMSInfo(ctx, "testcms1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info).To(BeAssignableToTypeOf(redis.CMSInfo{}))
+ })
+
+ It("should CMSMerge, CMSMergeWithWeight and CMSQuery", Label("cms", "cmsmerge", "cmsquery", "NonRedisEnterprise"), func() {
+ err := client.CMSMerge(ctx, "destCms1", "testcms2", "testcms3").Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("CMS: key does not exist"))
+
+ err = client.CMSInitByDim(ctx, "destCms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "destCms2", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms1", 2, 20).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms2", 3, 20).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.CMSMerge(ctx, "destCms1", "cms1", "cms2").Err()
+ Expect(err).To(MatchError("CMS: width/depth is not equal"))
+
+ client.Del(ctx, "cms1", "cms2")
+
+ err = client.CMSInitByDim(ctx, "cms1", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.CMSInitByDim(ctx, "cms2", 5, 10).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ client.CMSIncrBy(ctx, "cms1", "item1", 1, "item2", 2)
+ client.CMSIncrBy(ctx, "cms2", "item2", 2, "item3", 3)
+
+ err = client.CMSMerge(ctx, "destCms1", "cms1", "cms2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.CMSQuery(ctx, "destCms1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(4)))
+ Expect(result[2]).To(BeEquivalentTo(int64(3)))
+
+ sourceSketches := map[string]int64{
+ "cms1": 1,
+ "cms2": 2,
+ }
+ err = client.CMSMergeWithWeight(ctx, "destCms2", sourceSketches).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.CMSQuery(ctx, "destCms2", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(3))
+ Expect(result[0]).To(BeEquivalentTo(int64(1)))
+ Expect(result[1]).To(BeEquivalentTo(int64(6)))
+ Expect(result[2]).To(BeEquivalentTo(int64(6)))
+ })
+ })
+
+ Describe("TopK", Label("topk"), func() {
+ It("should TopKReserve, TopKInfo, TopKAdd, TopKQuery, TopKCount, TopKIncrBy, TopKList, TopKListWithCount", Label("topk", "topkreserve", "topkinfo", "topkadd", "topkquery", "topkcount", "topkincrby", "topklist", "topklistwithcount"), func() {
+ err := client.TopKReserve(ctx, "topk1", 3).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TopKInfo(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo.K).To(BeEquivalentTo(int64(3)))
+
+ resultAdd, err := client.TopKAdd(ctx, "topk1", "item1", "item2", 3, "item1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultAdd)).To(BeEquivalentTo(int64(4)))
+
+ resultQuery, err := client.TopKQuery(ctx, "topk1", "item1", "item2", 4, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultQuery)).To(BeEquivalentTo(4))
+ Expect(resultQuery[0]).To(BeTrue())
+ Expect(resultQuery[1]).To(BeTrue())
+ Expect(resultQuery[2]).To(BeFalse())
+ Expect(resultQuery[3]).To(BeTrue())
+
+ resultCount, err := client.TopKCount(ctx, "topk1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultCount)).To(BeEquivalentTo(3))
+ Expect(resultCount[0]).To(BeEquivalentTo(int64(2)))
+ Expect(resultCount[1]).To(BeEquivalentTo(int64(1)))
+ Expect(resultCount[2]).To(BeEquivalentTo(int64(0)))
+
+ resultIncr, err := client.TopKIncrBy(ctx, "topk1", "item1", 5, "item2", 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultIncr)).To(BeEquivalentTo(2))
+
+ resultCount, err = client.TopKCount(ctx, "topk1", "item1", "item2", "item3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultCount)).To(BeEquivalentTo(3))
+ Expect(resultCount[0]).To(BeEquivalentTo(int64(7)))
+ Expect(resultCount[1]).To(BeEquivalentTo(int64(11)))
+ Expect(resultCount[2]).To(BeEquivalentTo(int64(0)))
+
+ resultList, err := client.TopKList(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultList)).To(BeEquivalentTo(3))
+ Expect(resultList).To(ContainElements("item2", "item1", "3"))
+
+ resultListWithCount, err := client.TopKListWithCount(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(resultListWithCount)).To(BeEquivalentTo(3))
+ Expect(resultListWithCount["3"]).To(BeEquivalentTo(int64(1)))
+ Expect(resultListWithCount["item1"]).To(BeEquivalentTo(int64(7)))
+ Expect(resultListWithCount["item2"]).To(BeEquivalentTo(int64(11)))
+ })
+
+ It("should TopKReserveWithOptions", Label("topk", "topkreservewithoptions"), func() {
+ err := client.TopKReserveWithOptions(ctx, "topk1", 3, 1500, 8, 0.5).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TopKInfo(ctx, "topk1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo.K).To(BeEquivalentTo(int64(3)))
+ Expect(resultInfo.Width).To(BeEquivalentTo(int64(1500)))
+ Expect(resultInfo.Depth).To(BeEquivalentTo(int64(8)))
+ Expect(resultInfo.Decay).To(BeEquivalentTo(0.5))
+ })
+ })
+
+ Describe("t-digest", Label("tdigest"), func() {
+ It("should TDigestAdd, TDigestCreate, TDigestInfo, TDigestByRank, TDigestByRevRank, TDigestCDF, TDigestMax, TDigestMin, TDigestQuantile, TDigestRank, TDigestRevRank, TDigestTrimmedMean, TDigestReset, ", Label("tdigest", "tdigestadd", "tdigestcreate", "tdigestinfo", "tdigestbyrank", "tdigestbyrevrank", "tdigestcdf", "tdigestmax", "tdigestmin", "tdigestquantile", "tdigestrank", "tdigestrevrank", "tdigesttrimmedmean", "tdigestreset"), func() {
+ err := client.TDigestCreate(ctx, "tdigest1").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(0)))
+
+ // Test with empty sketch
+ byRank, err := client.TDigestByRank(ctx, "tdigest1", 0, 1, 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRank)).To(BeEquivalentTo(4))
+
+ byRevRank, err := client.TDigestByRevRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRevRank)).To(BeEquivalentTo(3))
+
+ cdf, err := client.TDigestCDF(ctx, "tdigest1", 15, 35, 70).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cdf)).To(BeEquivalentTo(3))
+
+ max, err := client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(max)).To(BeTrue())
+
+ min, err := client.TDigestMin(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(min)).To(BeTrue())
+
+ quantile, err := client.TDigestQuantile(ctx, "tdigest1", 0.1, 0.2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(quantile)).To(BeEquivalentTo(2))
+
+ rank, err := client.TDigestRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(rank)).To(BeEquivalentTo(2))
+
+ revRank, err := client.TDigestRevRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(revRank)).To(BeEquivalentTo(2))
+
+ trimmedMean, err := client.TDigestTrimmedMean(ctx, "tdigest1", 0.1, 0.6).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(math.IsNaN(trimmedMean)).To(BeTrue())
+
+ // Add elements
+ err = client.TDigestAdd(ctx, "tdigest1", 10, 20, 30, 40, 50, 60, 70, 80, 90, 100).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err = client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(10)))
+
+ byRank, err = client.TDigestByRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRank)).To(BeEquivalentTo(3))
+ Expect(byRank[0]).To(BeEquivalentTo(float64(10)))
+ Expect(byRank[1]).To(BeEquivalentTo(float64(20)))
+ Expect(byRank[2]).To(BeEquivalentTo(float64(30)))
+
+ byRevRank, err = client.TDigestByRevRank(ctx, "tdigest1", 0, 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(byRevRank)).To(BeEquivalentTo(3))
+ Expect(byRevRank[0]).To(BeEquivalentTo(float64(100)))
+ Expect(byRevRank[1]).To(BeEquivalentTo(float64(90)))
+ Expect(byRevRank[2]).To(BeEquivalentTo(float64(80)))
+
+ cdf, err = client.TDigestCDF(ctx, "tdigest1", 15, 35, 70).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cdf)).To(BeEquivalentTo(3))
+ Expect(cdf[0]).To(BeEquivalentTo(0.1))
+ Expect(cdf[1]).To(BeEquivalentTo(0.3))
+ Expect(cdf[2]).To(BeEquivalentTo(0.65))
+
+ max, err = client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(max).To(BeEquivalentTo(float64(100)))
+
+ min, err = client.TDigestMin(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(min).To(BeEquivalentTo(float64(10)))
+
+ quantile, err = client.TDigestQuantile(ctx, "tdigest1", 0.1, 0.2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(quantile)).To(BeEquivalentTo(2))
+ Expect(quantile[0]).To(BeEquivalentTo(float64(20)))
+ Expect(quantile[1]).To(BeEquivalentTo(float64(30)))
+
+ rank, err = client.TDigestRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(rank)).To(BeEquivalentTo(2))
+ Expect(rank[0]).To(BeEquivalentTo(int64(0)))
+ Expect(rank[1]).To(BeEquivalentTo(int64(1)))
+
+ revRank, err = client.TDigestRevRank(ctx, "tdigest1", 10, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(revRank)).To(BeEquivalentTo(2))
+ Expect(revRank[0]).To(BeEquivalentTo(int64(9)))
+ Expect(revRank[1]).To(BeEquivalentTo(int64(8)))
+
+ trimmedMean, err = client.TDigestTrimmedMean(ctx, "tdigest1", 0.1, 0.6).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(trimmedMean).To(BeEquivalentTo(float64(40)))
+
+ reset, err := client.TDigestReset(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(reset).To(BeEquivalentTo("OK"))
+ })
+
+ It("should TDigestCreateWithCompression", Label("tdigest", "tcreatewithcompression"), func() {
+ err := client.TDigestCreateWithCompression(ctx, "tdigest1", 2000).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Compression).To(BeEquivalentTo(int64(2000)))
+ })
+
+ It("should TDigestMerge", Label("tdigest", "tmerge", "NonRedisEnterprise"), func() {
+ err := client.TDigestCreate(ctx, "tdigest1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest1", 10, 20, 30, 40, 50, 60, 70, 80, 90, 100).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.TDigestCreate(ctx, "tdigest2").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest2", 15, 25, 35, 45, 55, 65, 75, 85, 95, 105).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.TDigestCreate(ctx, "tdigest3").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.TDigestAdd(ctx, "tdigest3", 50, 60, 70, 80, 90, 100, 110, 120, 130, 140).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ options := &redis.TDigestMergeOptions{
+ Compression: 1000,
+ Override: false,
+ }
+ err = client.TDigestMerge(ctx, "tdigest1", options, "tdigest2", "tdigest3").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ info, err := client.TDigestInfo(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(info.Observations).To(BeEquivalentTo(int64(30)))
+ Expect(info.Compression).To(BeEquivalentTo(int64(1000)))
+
+ max, err := client.TDigestMax(ctx, "tdigest1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(max).To(BeEquivalentTo(float64(140)))
+ })
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go
index efc2354..5df537c 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub.go
@@ -7,9 +7,9 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// PubSub implements Pub/Sub commands as described in
@@ -24,10 +24,11 @@ type PubSub struct {
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
- mu sync.Mutex
- cn *pool.Conn
- channels map[string]struct{}
- patterns map[string]struct{}
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+ schannels map[string]struct{}
closed bool
exit chan struct{}
@@ -46,6 +47,7 @@ func (c *PubSub) init() {
func (c *PubSub) String() string {
channels := mapKeys(c.channels)
channels = append(channels, mapKeys(c.patterns)...)
+ channels = append(channels, mapKeys(c.schannels)...)
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
@@ -82,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er
}
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
- return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
}
@@ -101,6 +103,13 @@ func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
}
}
+ if len(c.schannels) > 0 {
+ err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
return firstErr
}
@@ -208,15 +217,38 @@ func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
return err
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "ssubscribe", channels...)
+ if c.schannels == nil {
+ c.schannels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.schannels[s] = struct{}{}
+ }
+ return err
+}
+
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, channel := range channels {
- delete(c.channels, channel)
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.channels {
+ delete(c.channels, channel)
+ }
}
+
err := c.subscribe(ctx, "unsubscribe", channels...)
return err
}
@@ -227,13 +259,42 @@ func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, pattern := range patterns {
- delete(c.patterns, pattern)
+ if len(patterns) > 0 {
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ } else {
+ // Unsubscribe from all patterns.
+ for pattern := range c.patterns {
+ delete(c.patterns, pattern)
+ }
}
+
err := c.subscribe(ctx, "punsubscribe", patterns...)
return err
}
+// SUnsubscribe unsubscribes the client from the given shard channels,
+// or from all of them if none is given.
+func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.schannels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.schannels {
+ delete(c.schannels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "sunsubscribe", channels...)
+ return err
+}
+
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
cn, err := c.conn(ctx, channels)
if err != nil {
@@ -311,7 +372,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
}, nil
case []interface{}:
switch kind := reply[0].(string); kind {
- case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
// Can be nil in case of "unsubscribe".
channel, _ := reply[1].(string)
return &Subscription{
@@ -319,7 +380,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
Channel: channel,
Count: int(reply[2].(int64)),
}, nil
- case "message":
+ case "message", "smessage":
switch payload := reply[2].(type) {
case string:
return &Message{
@@ -371,7 +432,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
return nil, err
}
- err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
@@ -426,11 +487,11 @@ func (c *PubSub) getContext() context.Context {
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
+// is blocked full for 1 minute the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
+// and re-subscribes if ping can not not received for 1 minute.
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
c.chOnce.Do(func() {
c.msgCh = newChannel(c, opts...)
@@ -456,9 +517,9 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
c.chOnce.Do(func() {
- c.allCh = newChannel(c, WithChannelSize(size))
+ c.allCh = newChannel(c, opts...)
c.allCh.initAllChan()
})
if c.allCh == nil {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go
new file mode 100644
index 0000000..28622aa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_commands.go
@@ -0,0 +1,76 @@
+package redis
+
+import "context"
+
+type PubSubCmdable interface {
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+}
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "spublish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "shardchannels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "shardnumsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go
index 2dfa66b..a761006 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/pubsub_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/pubsub_test.go
@@ -1,30 +1,24 @@
package redis_test
import (
- "context"
"io"
"net"
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("PubSub", func() {
var client *redis.Client
- var clientID int64
BeforeEach(func() {
opt := redisOptions()
opt.MinIdleConns = 0
- opt.MaxConnAge = 0
- opt.OnConnect = func(ctx context.Context, cn *redis.Conn) (err error) {
- clientID, err = cn.ClientID(ctx).Result()
- return err
- }
+ opt.ConnMaxLifetime = 0
client = redis.NewClient(opt)
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
})
@@ -108,6 +102,35 @@ var _ = Describe("PubSub", func() {
Expect(len(channels)).To(BeNumerically(">=", 2))
})
+ It("should sharded pub/sub channels", func() {
+ channels, err := client.PubSubShardChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ pubsub := client.SSubscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err = client.PubSubShardChannels(ctx, "mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"}))
+
+ channels, err = client.PubSubShardChannels(ctx, "").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ channels, err = client.PubSubShardChannels(ctx, "*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(channels)).To(BeNumerically(">=", 2))
+
+ nums, err := client.PubSubShardNumSub(ctx, "mychannel", "mychannel2", "mychannel3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nums).To(Equal(map[string]int64{
+ "mychannel": 1,
+ "mychannel2": 1,
+ "mychannel3": 0,
+ }))
+ })
+
It("should return the numbers of subscribers", func() {
pubsub := client.Subscribe(ctx, "mychannel", "mychannel2")
defer pubsub.Close()
@@ -210,6 +233,82 @@ var _ = Describe("PubSub", func() {
Expect(stats.Misses).To(Equal(uint32(1)))
})
+ It("should sharded pub/sub", func() {
+ pubsub := client.SSubscribe(ctx, "mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("ssubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("ssubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(2))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).NotTo(HaveOccurred())
+ }
+
+ n, err := client.SPublish(ctx, "mychannel", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.SPublish(ctx, "mychannel2", "hello2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.SUnsubscribe(ctx, "mychannel", "mychannel2")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel2"))
+ Expect(msg.Payload).To(Equal("hello2"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("sunsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("sunsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Misses).To(Equal(uint32(1)))
+ })
+
It("should ping/pong", func() {
pubsub := client.Subscribe(ctx, "mychannel")
defer pubsub.Close()
@@ -421,30 +520,6 @@ var _ = Describe("PubSub", func() {
Expect(msg.Payload).To(Equal(string(bigVal)))
})
- It("handles message payload slice with server-assisted client-size caching", func() {
- pubsub := client.Subscribe(ctx, "__redis__:invalidate")
- defer pubsub.Close()
-
- client2 := redis.NewClient(redisOptions())
- defer client2.Close()
-
- err := client2.Do(ctx, "CLIENT", "TRACKING", "on", "REDIRECT", clientID).Err()
- Expect(err).NotTo(HaveOccurred())
-
- err = client2.Do(ctx, "GET", "mykey").Err()
- Expect(err).To(Equal(redis.Nil))
-
- err = client2.Do(ctx, "SET", "mykey", "myvalue").Err()
- Expect(err).NotTo(HaveOccurred())
-
- ch := pubsub.Channel()
-
- var msg *redis.Message
- Eventually(ch).Should(Receive(&msg))
- Expect(msg.Channel).To(Equal("__redis__:invalidate"))
- Expect(msg.PayloadSlice).To(Equal([]string{"mykey"}))
- })
-
It("supports concurrent Ping and Receive", func() {
const N = 100
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go
index 34699d1..aeb2d1f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/race_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/race_test.go
@@ -2,7 +2,6 @@ package redis_test
import (
"bytes"
- "context"
"fmt"
"net"
"strconv"
@@ -10,10 +9,10 @@ import (
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("races", func() {
@@ -138,7 +137,7 @@ var _ = Describe("races", func() {
})
})
- It("should select db", func() {
+ It("should select db", Label("NonRedisEnterprise"), func() {
err := client.Set(ctx, "db", 1, 0).Err()
Expect(err).NotTo(HaveOccurred())
@@ -215,53 +214,6 @@ var _ = Describe("races", func() {
Expect(val).To(Equal(int64(C * N)))
})
- It("should Pipeline", func() {
- perform(C, func(id int) {
- pipe := client.Pipeline()
- for i := 0; i < N; i++ {
- pipe.Echo(ctx, fmt.Sprint(i))
- }
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- for i := 0; i < N; i++ {
- Expect(cmds[i].(*redis.StringCmd).Val()).To(Equal(fmt.Sprint(i)))
- }
- })
- })
-
- It("should Pipeline", func() {
- pipe := client.Pipeline()
- perform(N, func(id int) {
- pipe.Incr(ctx, "key")
- })
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- n, err := client.Get(ctx, "key").Int64()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(N)))
- })
-
- It("should TxPipeline", func() {
- pipe := client.TxPipeline()
- perform(N, func(id int) {
- pipe.Incr(ctx, "key")
- })
-
- cmds, err := pipe.Exec(ctx)
- Expect(err).NotTo(HaveOccurred())
- Expect(cmds).To(HaveLen(N))
-
- n, err := client.Get(ctx, "key").Int64()
- Expect(err).NotTo(HaveOccurred())
- Expect(n).To(Equal(int64(N)))
- })
-
PIt("should BLPop", func() {
var received uint32
@@ -289,36 +241,9 @@ var _ = Describe("races", func() {
wg.Wait()
Expect(atomic.LoadUint32(&received)).To(Equal(uint32(C * N)))
})
-
- It("should WithContext", func() {
- perform(C, func(_ int) {
- err := client.WithContext(ctx).Ping(ctx).Err()
- Expect(err).NotTo(HaveOccurred())
- })
- })
-
- It("should abort on context timeout", func() {
- opt := redisClusterOptions()
- client := cluster.newClusterClient(ctx, opt)
-
- ctx, cancel := context.WithCancel(context.Background())
-
- wg := performAsync(C, func(_ int) {
- _, err := client.XRead(ctx, &redis.XReadArgs{
- Streams: []string{"test", "$"},
- Block: 1 * time.Second,
- }).Result()
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(Or(Equal(context.Canceled.Error()), ContainSubstring("operation was canceled")))
- })
-
- time.Sleep(10 * time.Millisecond)
- cancel()
- wg.Wait()
- })
})
-var _ = Describe("cluster races", func() {
+var _ = Describe("cluster races", Label("NonRedisEnterprise"), func() {
var client *redis.ClusterClient
var C, N int
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go
new file mode 100644
index 0000000..d25a0d3
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis.go
@@ -0,0 +1,852 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// Scanner internal/hscan.Scanner exposed interface.
+type Scanner = hscan.Scanner
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+// SetLogger set custom log
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ DialHook(next DialHook) DialHook
+ ProcessHook(next ProcessHook) ProcessHook
+ ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
+}
+
+type (
+ DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
+ ProcessHook func(ctx context.Context, cmd Cmder) error
+ ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
+)
+
+type hooksMixin struct {
+ hooksMu *sync.Mutex
+
+ slice []Hook
+ initial hooks
+ current hooks
+}
+
+func (hs *hooksMixin) initHooks(hooks hooks) {
+ hs.hooksMu = new(sync.Mutex)
+ hs.initial = hooks
+ hs.chain()
+}
+
+type hooks struct {
+ dial DialHook
+ process ProcessHook
+ pipeline ProcessPipelineHook
+ txPipeline ProcessPipelineHook
+}
+
+func (h *hooks) setDefaults() {
+ if h.dial == nil {
+ h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
+ }
+ if h.process == nil {
+ h.process = func(ctx context.Context, cmd Cmder) error { return nil }
+ }
+ if h.pipeline == nil {
+ h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+ if h.txPipeline == nil {
+ h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+}
+
+// AddHook is to add a hook to the queue.
+// Hook is a function executed during network connection, command execution, and pipeline,
+// it is a first-in-first-out stack queue (FIFO).
+// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
+// For example, you added hook-1, hook-2:
+//
+// client.AddHook(hook-1, hook-2)
+//
+// hook-1:
+//
+// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd Cmder) error {
+// print("hook-1 start")
+// next(ctx, cmd)
+// print("hook-1 end")
+// return nil
+// }
+// }
+//
+// hook-2:
+//
+// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd redis.Cmder) error {
+// print("hook-2 start")
+// next(ctx, cmd)
+// print("hook-2 end")
+// return nil
+// }
+// }
+//
+// The execution sequence is:
+//
+// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
+//
+// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
+// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
+func (hs *hooksMixin) AddHook(hook Hook) {
+ hs.slice = append(hs.slice, hook)
+ hs.chain()
+}
+
+func (hs *hooksMixin) chain() {
+ hs.initial.setDefaults()
+
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ hs.current.dial = hs.initial.dial
+ hs.current.process = hs.initial.process
+ hs.current.pipeline = hs.initial.pipeline
+ hs.current.txPipeline = hs.initial.txPipeline
+
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
+ hs.current.dial = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
+ hs.current.process = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
+ hs.current.pipeline = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
+ hs.current.txPipeline = wrapped
+ }
+ }
+}
+
+func (hs *hooksMixin) clone() hooksMixin {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ clone := *hs
+ l := len(clone.slice)
+ clone.slice = clone.slice[:l:l]
+ clone.hooksMu = new(sync.Mutex)
+ return clone
+}
+
+func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmd)
+}
+
+func (hs *hooksMixin) withProcessPipelineHook(
+ ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
+) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmds)
+}
+
+func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+ return hs.current.dial(ctx, network, addr)
+}
+
+func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
+ return hs.current.process(ctx, cmd)
+}
+
+func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.pipeline(ctx, cmds)
+}
+
+func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.txPipeline(ctx, cmds)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ username, password := c.opt.Username, c.opt.Password
+ if c.opt.CredentialsProvider != nil {
+ username, password = c.opt.CredentialsProvider()
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(c.opt, connPool)
+
+ var auth bool
+ protocol := c.opt.Protocol
+ // By default, use RESP3 in current version.
+ if protocol < 2 {
+ protocol = 3
+ }
+
+ // for redis-server versions that do not support the HELLO command,
+ // RESP2 will continue to be used.
+ if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil {
+ auth = true
+ } else if !isRedisError(err) {
+ // When the server responds with the RESP protocol and the result is not a normal
+ // execution result of the HELLO command, we consider it to be an indication that
+ // the server does not support the HELLO command.
+ // The server may be a redis-server that does not support the HELLO command,
+ // or it could be DragonflyDB or a third-party redis-proxy. They all respond
+ // with different error string results for unsupported commands, making it
+ // difficult to rely on error strings to determine all results.
+ return err
+ }
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if !auth && password != "" {
+ if username != "" {
+ pipe.AuthACL(ctx, username, password)
+ } else {
+ pipe.Auth(ctx, password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ if c.opt.ClientName != "" {
+ pipe.ClientSetName(ctx, c.opt.ClientName)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !c.opt.DisableIndentity {
+ libName := ""
+ libVer := Version()
+ if c.opt.IdentitySuffix != "" {
+ libName = c.opt.IdentitySuffix
+ }
+ p := conn.Pipeline()
+ p.ClientSetInfo(ctx, WithLibraryName(libName))
+ p.ClientSetInfo(ctx, WithLibraryVersion(libVer))
+ _, _ = p.Exec(ctx)
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ var fnErr error
+ defer func() {
+ c.releaseConn(ctx, cn, fnErr)
+ }()
+
+ fnErr = fn(ctx, cn)
+
+ return fnErr
+}
+
+func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.opt.Dialer(ctx, network, addr)
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(0)
+ if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ }); err != nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ return err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ } else {
+ atomic.StoreUint32(&retryTimeout, 0)
+ }
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+ }
+
+ return false, nil
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ // Enable retries by default to retry dial errors returned by withConn.
+ canRetry := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ }); err != nil {
+ return true, err
+ }
+
+ return false, nil
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+ // Retry errors like "LOADING redis is loading the dataset in memory".
+ return cmds[0].Err()
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ }); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse +OK.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ // Parse +QUEUED.
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *baseClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more underlying connections.
+// It's safe for concurrent use by multiple goroutines.
+//
+// Client creates and frees connections automatically; it also maintains a free pool
+// of idle connections. You can control the pool size with Config.PoolSize option.
+type Client struct {
+ *baseClient
+ cmdable
+ hooksMixin
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ c.init()
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return &c
+}
+
+func (c *Client) init() {
+ c.cmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := *c
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ clone.init()
+ return &clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooksMixin
+}
+
+func newConn(opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ }
+
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipelineHook,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go
index 095da2d..66d69c7 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/redis_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/redis_test.go
@@ -4,28 +4,33 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"net"
"testing"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
-type redisHookError struct {
- redis.Hook
-}
+type redisHookError struct{}
var _ redis.Hook = redisHookError{}
-func (redisHookError) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- return ctx, nil
+func (redisHookError) DialHook(hook redis.DialHook) redis.DialHook {
+ return hook
+}
+
+func (redisHookError) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ return errors.New("hook error")
+ }
}
-func (redisHookError) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
- return errors.New("hook error")
+func (redisHookError) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return hook
}
func TestHookError(t *testing.T) {
@@ -60,7 +65,11 @@ var _ = Describe("Client", func() {
})
It("should Stringer", func() {
- Expect(client.String()).To(Equal("Redis<:6380 db:15>"))
+ if RECluster {
+ Expect(client.String()).To(Equal(fmt.Sprintf("Redis<:%s db:0>", redisPort)))
+ } else {
+ Expect(client.String()).To(Equal(fmt.Sprintf("Redis<:%s db:15>", redisPort)))
+ }
})
It("supports context", func() {
@@ -71,7 +80,7 @@ var _ = Describe("Client", func() {
Expect(err).To(MatchError("context canceled"))
})
- It("supports WithTimeout", func() {
+ It("supports WithTimeout", Label("NonRedisEnterprise"), func() {
err := client.ClientPause(ctx, time.Second).Err()
Expect(err).NotTo(HaveOccurred())
@@ -136,17 +145,6 @@ var _ = Describe("Client", func() {
Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
})
- It("should close pipeline without closing the client", func() {
- pipeline := client.Pipeline()
- Expect(pipeline.Close()).NotTo(HaveOccurred())
-
- pipeline.Ping(ctx)
- _, err := pipeline.Exec(ctx)
- Expect(err).To(MatchError("redis: client is closed"))
-
- Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
- })
-
It("should close pubsub when client is closed", func() {
pubsub := client.Subscribe(ctx)
Expect(client.Close()).NotTo(HaveOccurred())
@@ -157,13 +155,7 @@ var _ = Describe("Client", func() {
Expect(pubsub.Close()).NotTo(HaveOccurred())
})
- It("should close pipeline when client is closed", func() {
- pipeline := client.Pipeline()
- Expect(client.Close()).NotTo(HaveOccurred())
- Expect(pipeline.Close()).NotTo(HaveOccurred())
- })
-
- It("should select DB", func() {
+ It("should select DB", Label("NonRedisEnterprise"), func() {
db2 := redis.NewClient(&redis.Options{
Addr: redisAddr,
DB: 2,
@@ -182,6 +174,48 @@ var _ = Describe("Client", func() {
Expect(db2.Close()).NotTo(HaveOccurred())
})
+ It("should client setname", func() {
+ opt := redisOptions()
+ opt.ClientName = "hi"
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ Expect(db.Ping(ctx).Err()).NotTo(HaveOccurred())
+ val, err := db.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=hi"))
+ })
+
+ It("should client PROTO 2", func() {
+ opt := redisOptions()
+ opt.Protocol = 2
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ val, err := db.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ })
+
+ It("should client PROTO 3", func() {
+ opt := redisOptions()
+ db := redis.NewClient(opt)
+
+ defer func() {
+ Expect(db.Close()).NotTo(HaveOccurred())
+ }()
+
+ val, err := db.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ })
+
It("processes custom commands", func() {
cmd := redis.NewCmd(ctx, "PING")
_ = client.Process(ctx, cmd)
@@ -313,9 +347,21 @@ var _ = Describe("Client", func() {
})
It("should Conn", func() {
- err := client.Conn(ctx).Get(ctx, "this-key-does-not-exist").Err()
+ err := client.Conn().Get(ctx, "this-key-does-not-exist").Err()
Expect(err).To(Equal(redis.Nil))
})
+
+ It("should set and scan net.IP", func() {
+ ip := net.ParseIP("192.168.1.1")
+ err := client.Set(ctx, "ip", ip, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var ip2 net.IP
+ err = client.Get(ctx, "ip").Scan(&ip2)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ip2).To(Equal(ip))
+ })
})
var _ = Describe("Client timeout", func() {
@@ -447,3 +493,143 @@ var _ = Describe("Client context cancelation", func() {
Expect(err).To(BeIdenticalTo(context.Canceled))
})
})
+
+var _ = Describe("Conn", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("TxPipeline", Label("NonRedisEnterprise"), func() {
+ tx := client.Conn().TxPipeline()
+ tx.SwapDB(ctx, 0, 2)
+ tx.SwapDB(ctx, 1, 0)
+ _, err := tx.Exec(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
+
+var _ = Describe("Hook", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("fifo", func() {
+ var res []string
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-1-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-1-process-end")
+ return err
+ }
+ },
+ })
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-2-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-2-process-end")
+ return err
+ }
+ },
+ })
+
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(res).To(Equal([]string{
+ "hook-1-process-start",
+ "hook-2-process-start",
+ "hook-2-process-end",
+ "hook-1-process-end",
+ }))
+ })
+
+ It("wrapped error in a hook", func() {
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ if err := hook(ctx, cmd); err != nil {
+ return fmt.Errorf("wrapped error: %w", err)
+ }
+ return nil
+ }
+ },
+ })
+ client.ScriptFlush(ctx)
+
+ script := redis.NewScript(`return 'Script and hook'`)
+
+ cmd := script.Run(ctx, client, nil)
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("Script and hook"))
+ })
+})
+
+var _ = Describe("Hook with MinIdleConns", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ options := redisOptions()
+ options.MinIdleConns = 1
+ client = redis.NewClient(options)
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("fifo", func() {
+ var res []string
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-1-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-1-process-end")
+ return err
+ }
+ },
+ })
+ client.AddHook(&hook{
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ res = append(res, "hook-2-process-start")
+ err := hook(ctx, cmd)
+ res = append(res, "hook-2-process-end")
+ return err
+ }
+ },
+ })
+
+ err := client.Ping(ctx).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(res).To(Equal([]string{
+ "hook-1-process-start",
+ "hook-2-process-start",
+ "hook-2-process-end",
+ "hook-1-process-end",
+ }))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go
index 24cfd49..cfd4cf9 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/result.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/result.go
@@ -82,17 +82,17 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
return &cmd
}
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
-func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
- var cmd StringStringMapCmd
+// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
+func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
+ var cmd MapStringStringCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
}
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
-func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
- var cmd StringIntMapCmd
+// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
+func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
+ var cmd MapStringIntCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
@@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
return &cmd
}
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
@@ -178,3 +178,11 @@ func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
cmd.SetErr(err)
return &cmd
}
+
+// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
+func NewXPendingResult(val *XPending, err error) *XPendingCmd {
+ var cmd XPendingCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go
index 4df00fc..4ae0054 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring.go
@@ -12,12 +12,12 @@ import (
"time"
"github.com/cespare/xxhash/v2"
- rendezvous "github.com/dgryski/go-rendezvous" //nolint
+ "github.com/dgryski/go-rendezvous" //nolint
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
)
var errRingShardsDown = errors.New("redis: all ring shards are down")
@@ -48,8 +48,11 @@ type RingOptions struct {
// Map of name => host:port addresses of ring shards.
Addrs map[string]string
- // NewClient creates a shard client with provided name and options.
- NewClient func(name string, opt *Options) *Client
+ // NewClient creates a shard client with provided options.
+ NewClient func(opt *Options) *Client
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
// Frequency of PING commands sent to check shards availability.
// Shard is considered down after 3 subsequent failed checks.
@@ -67,6 +70,7 @@ type RingOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
DB int
@@ -75,27 +79,32 @@ type RingOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
Limiter Limiter
+
+ DisableIndentity bool
+ IdentitySuffix string
}
func (opt *RingOptions) init() {
if opt.NewClient == nil {
- opt.NewClient = func(name string, opt *Options) *Client {
+ opt.NewClient = func(opt *Options) *Client {
return NewClient(opt)
}
}
@@ -129,29 +138,36 @@ func (opt *RingOptions) init() {
func (opt *RingOptions) clientOptions() *Options {
return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
MaxRetries: -1,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
Limiter: opt.Limiter,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
@@ -160,14 +176,16 @@ func (opt *RingOptions) clientOptions() *Options {
type ringShard struct {
Client *Client
down int32
+ addr string
}
-func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+func newRingShard(opt *RingOptions, addr string) *ringShard {
clopt := opt.clientOptions()
clopt.Addr = addr
return &ringShard{
- Client: opt.NewClient(name, clopt),
+ Client: opt.NewClient(clopt),
+ addr: addr,
}
}
@@ -208,161 +226,237 @@ func (shard *ringShard) Vote(up bool) bool {
//------------------------------------------------------------------------------
-type ringShards struct {
+type ringSharding struct {
opt *RingOptions
- mu sync.RWMutex
- hash ConsistentHash
- shards map[string]*ringShard // read only
- list []*ringShard // read only
- numShard int
- closed bool
+ mu sync.RWMutex
+ shards *ringShards
+ closed bool
+ hash ConsistentHash
+ numShard int
+ onNewNode []func(rdb *Client)
+
+ // ensures exclusive access to SetAddrs so there is no need
+ // to hold mu for the duration of potentially long shard creation
+ setAddrsMu sync.Mutex
+}
+
+type ringShards struct {
+ m map[string]*ringShard
+ list []*ringShard
+}
+
+func newRingSharding(opt *RingOptions) *ringSharding {
+ c := &ringSharding{
+ opt: opt,
+ }
+ c.SetAddrs(opt.Addrs)
+
+ return c
+}
+
+func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
}
-func newRingShards(opt *RingOptions) *ringShards {
- shards := make(map[string]*ringShard, len(opt.Addrs))
- list := make([]*ringShard, 0, len(shards))
+// SetAddrs replaces the shards in use, such that you can increase and
+// decrease number of shards, that you use. It will reuse shards that
+// existed before and close the ones that will not be used anymore.
+func (c *ringSharding) SetAddrs(addrs map[string]string) {
+ c.setAddrsMu.Lock()
+ defer c.setAddrsMu.Unlock()
- for name, addr := range opt.Addrs {
- shard := newRingShard(opt, name, addr)
- shards[name] = shard
+ cleanup := func(shards map[string]*ringShard) {
+ for addr, shard := range shards {
+ if err := shard.Client.Close(); err != nil {
+ internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
+ }
+ }
+ }
- list = append(list, shard)
+ c.mu.RLock()
+ if c.closed {
+ c.mu.RUnlock()
+ return
}
+ existing := c.shards
+ c.mu.RUnlock()
- c := &ringShards{
- opt: opt,
+ shards, created, unused := c.newRingShards(addrs, existing)
- shards: shards,
- list: list,
+ c.mu.Lock()
+ if c.closed {
+ cleanup(created)
+ c.mu.Unlock()
+ return
}
- c.rebalance()
+ c.shards = shards
+ c.rebalanceLocked()
+ c.mu.Unlock()
- return c
+ cleanup(unused)
}
-func (c *ringShards) List() []*ringShard {
+func (c *ringSharding) newRingShards(
+ addrs map[string]string, existing *ringShards,
+) (shards *ringShards, created, unused map[string]*ringShard) {
+ shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
+ created = make(map[string]*ringShard) // indexed by addr
+ unused = make(map[string]*ringShard) // indexed by addr
+
+ if existing != nil {
+ for _, shard := range existing.list {
+ unused[shard.addr] = shard
+ }
+ }
+
+ for name, addr := range addrs {
+ if shard, ok := unused[addr]; ok {
+ shards.m[name] = shard
+ delete(unused, addr)
+ } else {
+ shard := newRingShard(c.opt, addr)
+ shards.m[name] = shard
+ created[addr] = shard
+
+ for _, fn := range c.onNewNode {
+ fn(shard.Client)
+ }
+ }
+ }
+
+ for _, shard := range shards.m {
+ shards.list = append(shards.list, shard)
+ }
+
+ return
+}
+
+func (c *ringSharding) List() []*ringShard {
var list []*ringShard
c.mu.RLock()
if !c.closed {
- list = c.list
+ list = c.shards.list
}
c.mu.RUnlock()
return list
}
-func (c *ringShards) Hash(key string) string {
+func (c *ringSharding) Hash(key string) string {
key = hashtag.Key(key)
var hash string
c.mu.RLock()
+ defer c.mu.RUnlock()
+
if c.numShard > 0 {
hash = c.hash.Get(key)
}
- c.mu.RUnlock()
return hash
}
-func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
key = hashtag.Key(key)
c.mu.RLock()
+ defer c.mu.RUnlock()
if c.closed {
- c.mu.RUnlock()
return nil, pool.ErrClosed
}
if c.numShard == 0 {
- c.mu.RUnlock()
return nil, errRingShardsDown
}
- hash := c.hash.Get(key)
- if hash == "" {
- c.mu.RUnlock()
+ shardName := c.hash.Get(key)
+ if shardName == "" {
return nil, errRingShardsDown
}
-
- shard := c.shards[hash]
- c.mu.RUnlock()
-
- return shard, nil
+ return c.shards.m[shardName], nil
}
-func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
if shardName == "" {
return c.Random()
}
c.mu.RLock()
- shard := c.shards[shardName]
- c.mu.RUnlock()
- return shard, nil
+ defer c.mu.RUnlock()
+
+ return c.shards.m[shardName], nil
}
-func (c *ringShards) Random() (*ringShard, error) {
+func (c *ringSharding) Random() (*ringShard, error) {
return c.GetByKey(strconv.Itoa(rand.Int()))
}
-// heartbeat monitors state of each shard in the ring.
-func (c *ringShards) Heartbeat(frequency time.Duration) {
+// Heartbeat monitors state of each shard in the ring.
+func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
- ctx := context.Background()
- for range ticker.C {
- var rebalance bool
-
- for _, shard := range c.List() {
- err := shard.Client.Ping(ctx).Err()
- isUp := err == nil || err == pool.ErrPoolTimeout
- if shard.Vote(isUp) {
- internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
- rebalance = true
+ for {
+ select {
+ case <-ticker.C:
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
+ rebalance = true
+ }
}
- }
- if rebalance {
- c.rebalance()
+ if rebalance {
+ c.mu.Lock()
+ c.rebalanceLocked()
+ c.mu.Unlock()
+ }
+ case <-ctx.Done():
+ return
}
}
}
-// rebalance removes dead shards from the Ring.
-func (c *ringShards) rebalance() {
- c.mu.RLock()
- shards := c.shards
- c.mu.RUnlock()
+// rebalanceLocked removes dead shards from the Ring.
+// Requires c.mu locked.
+func (c *ringSharding) rebalanceLocked() {
+ if c.closed {
+ return
+ }
+ if c.shards == nil {
+ return
+ }
- liveShards := make([]string, 0, len(shards))
+ liveShards := make([]string, 0, len(c.shards.m))
- for name, shard := range shards {
+ for name, shard := range c.shards.m {
if shard.IsUp() {
liveShards = append(liveShards, name)
}
}
- hash := c.opt.NewConsistentHash(liveShards)
-
- c.mu.Lock()
- c.hash = hash
+ c.hash = c.opt.NewConsistentHash(liveShards)
c.numShard = len(liveShards)
- c.mu.Unlock()
}
-func (c *ringShards) Len() int {
+func (c *ringSharding) Len() int {
c.mu.RLock()
- l := c.numShard
- c.mu.RUnlock()
- return l
+ defer c.mu.RUnlock()
+
+ return c.numShard
}
-func (c *ringShards) Close() error {
+func (c *ringSharding) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
@@ -372,26 +466,22 @@ func (c *ringShards) Close() error {
c.closed = true
var firstErr error
- for _, shard := range c.shards {
+
+ for _, shard := range c.shards.list {
if err := shard.Client.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
+
c.hash = nil
c.shards = nil
- c.list = nil
+ c.numShard = 0
return firstErr
}
//------------------------------------------------------------------------------
-type ring struct {
- opt *RingOptions
- shards *ringShards
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
// Ring is a Redis client that uses consistent hashing to distribute
// keys across multiple Redis servers (shards). It's safe for
// concurrent use by multiple goroutines.
@@ -407,47 +497,49 @@ type ring struct {
// and can tolerate losing data when one of the servers dies.
// Otherwise you should use Redis Cluster.
type Ring struct {
- *ring
cmdable
- hooks
- ctx context.Context
+ hooksMixin
+
+ opt *RingOptions
+ sharding *ringSharding
+ cmdsInfoCache *cmdsInfoCache
+ heartbeatCancelFn context.CancelFunc
}
func NewRing(opt *RingOptions) *Ring {
opt.init()
+ hbCtx, hbCancel := context.WithCancel(context.Background())
+
ring := Ring{
- ring: &ring{
- opt: opt,
- shards: newRingShards(opt),
- },
- ctx: context.Background(),
+ opt: opt,
+ sharding: newRingSharding(opt),
+ heartbeatCancelFn: hbCancel,
}
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
ring.cmdable = ring.Process
- go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+ ring.initHooks(hooks{
+ process: ring.process,
+ pipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, false)
+ },
+ txPipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, true)
+ },
+ })
- return &ring
-}
+ go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
-func (c *Ring) Context() context.Context {
- return c.ctx
+ return &ring
}
-func (c *Ring) WithContext(ctx context.Context) *Ring {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
+func (c *Ring) SetAddrs(addrs map[string]string) {
+ c.sharding.SetAddrs(addrs)
}
-// Do creates a Cmd from the args and processes the cmd.
+// Do create a Cmd from the args and processes the cmd.
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
@@ -455,7 +547,9 @@ func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
}
func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
// Options returns read-only Options that were used to create the client.
@@ -469,7 +563,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration {
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
- shards := c.shards.List()
+ shards := c.sharding.List()
var acc PoolStats
for _, shard := range shards {
s := shard.Client.connPool.Stats()
@@ -484,7 +578,7 @@ func (c *Ring) PoolStats() *PoolStats {
// Len returns the current number of shards in the ring.
func (c *Ring) Len() int {
- return c.shards.Len()
+ return c.sharding.Len()
}
// Subscribe subscribes the client to the specified channels.
@@ -493,7 +587,7 @@ func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
- shard, err := c.shards.GetByKey(channels[0])
+ shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@@ -507,7 +601,7 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
panic("at least one channel is required")
}
- shard, err := c.shards.GetByKey(channels[0])
+ shard, err := c.sharding.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
@@ -515,13 +609,30 @@ func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
return shard.Client.PSubscribe(ctx, channels...)
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.SSubscribe(ctx, channels...)
+}
+
+func (c *Ring) OnNewNode(fn func(rdb *Client)) {
+ c.sharding.OnNewNode(fn)
+}
+
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(
ctx context.Context,
fn func(ctx context.Context, client *Client) error,
) error {
- shards := c.shards.List()
+ shards := c.sharding.List()
var wg sync.WaitGroup
errCh := make(chan error, 1)
for _, shard := range shards {
@@ -552,7 +663,7 @@ func (c *Ring) ForEachShard(
}
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- shards := c.shards.List()
+ shards := c.sharding.List()
var firstErr error
for _, shard := range shards {
cmdsInfo, err := shard.Client.Command(ctx).Result()
@@ -569,26 +680,13 @@ func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
return nil, firstErr
}
-func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
- if err != nil {
- return nil
- }
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
- }
- return info
-}
-
func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- pos := cmdFirstKeyPos(cmd, cmdInfo)
+ pos := cmdFirstKeyPos(cmd)
if pos == 0 {
- return c.shards.Random()
+ return c.sharding.Random()
}
firstKey := cmd.stringArg(pos)
- return c.shards.GetByKey(firstKey)
+ return c.sharding.GetByKey(firstKey)
}
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
@@ -619,47 +717,41 @@ func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
func (c *Ring) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
+ exec: pipelineExecer(c.processPipelineHook),
}
pipe.init()
return &pipe
}
-func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, false)
- })
-}
-
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(ctx, fn)
}
func (c *Ring) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
}
pipe.init()
return &pipe
}
-func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, true)
- })
-}
-
func (c *Ring) generalProcessPipeline(
ctx context.Context, cmds []Cmder, tx bool,
) error {
+ if tx {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+ }
+
cmdsMap := make(map[string][]Cmder)
+
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd))
if hash != "" {
- hash = c.shards.Hash(hash)
+ hash = c.sharding.Hash(hash)
}
cmdsMap[hash] = append(cmdsMap[hash], cmd)
}
@@ -670,7 +762,19 @@ func (c *Ring) generalProcessPipeline(
go func(hash string, cmds []Cmder) {
defer wg.Done()
- _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ // TODO: retry?
+ shard, err := c.sharding.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ if tx {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = shard.Client.processTxPipelineHook(ctx, cmds)
+ } else {
+ _ = shard.Client.processPipelineHook(ctx, cmds)
+ }
}(hash, cmds)
}
@@ -678,31 +782,16 @@ func (c *Ring) generalProcessPipeline(
return cmdsFirstErr(cmds)
}
-func (c *Ring) processShardPipeline(
- ctx context.Context, hash string, cmds []Cmder, tx bool,
-) error {
- // TODO: retry?
- shard, err := c.shards.GetByName(hash)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- if tx {
- return shard.Client.processTxPipeline(ctx, cmds)
- }
- return shard.Client.processPipeline(ctx, cmds)
-}
-
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
var shards []*ringShard
+
for _, key := range keys {
if key != "" {
- shard, err := c.shards.GetByKey(hashtag.Key(key))
+ shard, err := c.sharding.GetByKey(hashtag.Key(key))
if err != nil {
return err
}
@@ -732,5 +821,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er
// It is rare to Close a Ring, as the Ring is meant to be long-lived
// and shared between many goroutines.
func (c *Ring) Close() error {
- return c.shards.Close()
+ c.heartbeatCancelFn()
+
+ return c.sharding.Close()
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go
index 03a49fd..b3017f6 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/ring_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/ring_test.go
@@ -9,12 +9,43 @@ import (
"sync"
"time"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
+var _ = Describe("Redis Ring PROTO 2", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.Protocol = 2
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(ctx, func(ctx context.Context, cl *redis.Client) error {
+ return cl.FlushDB(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should ring PROTO 2", func() {
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
+})
+
var _ = Describe("Redis Ring", func() {
const heartbeat = 100 * time.Millisecond
@@ -29,6 +60,7 @@ var _ = Describe("Redis Ring", func() {
BeforeEach(func() {
opt := redisRingOptions()
+ opt.ClientName = "ring_hi"
opt.HeartbeatFrequency = heartbeat
ring = redis.NewRing(opt)
@@ -50,6 +82,29 @@ var _ = Describe("Redis Ring", func() {
Expect(err).To(MatchError("context canceled"))
})
+ It("should ring client setname", func() {
+ err := ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=ring_hi"))
+ return nil
+ })
+ })
+
+ It("should ring PROTO 3", func() {
+ _ = ring.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
+
It("distributes keys", func() {
setRingKeys()
@@ -113,7 +168,81 @@ var _ = Describe("Redis Ring", func() {
Expect(ringShard2.Info(ctx, "keyspace").Val()).To(ContainSubstring("keys=100"))
})
+ Describe("[new] dynamic setting ring shards", func() {
+ It("downscale shard and check reuse shard, upscale shard and check reuse", func() {
+ Expect(ring.Len(), 2)
+
+ wantShard := ring.ShardByName("ringShardOne")
+ ring.SetAddrs(map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ })
+ Expect(ring.Len(), 1)
+ gotShard := ring.ShardByName("ringShardOne")
+ Expect(gotShard).To(BeIdenticalTo(wantShard))
+
+ ring.SetAddrs(map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ "ringShardTwo": ":" + ringShard2Port,
+ })
+ Expect(ring.Len(), 2)
+ gotShard = ring.ShardByName("ringShardOne")
+ Expect(gotShard).To(BeIdenticalTo(wantShard))
+ })
+
+ It("uses 3 shards after setting it to 3 shards", func() {
+ Expect(ring.Len(), 2)
+
+ shardName1 := "ringShardOne"
+ shardAddr1 := ":" + ringShard1Port
+ wantShard1 := ring.ShardByName(shardName1)
+ shardName2 := "ringShardTwo"
+ shardAddr2 := ":" + ringShard2Port
+ wantShard2 := ring.ShardByName(shardName2)
+ shardName3 := "ringShardThree"
+ shardAddr3 := ":" + ringShard3Port
+
+ ring.SetAddrs(map[string]string{
+ shardName1: shardAddr1,
+ shardName2: shardAddr2,
+ shardName3: shardAddr3,
+ })
+ Expect(ring.Len(), 3)
+ gotShard1 := ring.ShardByName(shardName1)
+ gotShard2 := ring.ShardByName(shardName2)
+ gotShard3 := ring.ShardByName(shardName3)
+ Expect(gotShard1).To(BeIdenticalTo(wantShard1))
+ Expect(gotShard2).To(BeIdenticalTo(wantShard2))
+ Expect(gotShard3).ToNot(BeNil())
+
+ ring.SetAddrs(map[string]string{
+ shardName1: shardAddr1,
+ shardName2: shardAddr2,
+ })
+ Expect(ring.Len(), 2)
+ gotShard1 = ring.ShardByName(shardName1)
+ gotShard2 = ring.ShardByName(shardName2)
+ gotShard3 = ring.ShardByName(shardName3)
+ Expect(gotShard1).To(BeIdenticalTo(wantShard1))
+ Expect(gotShard2).To(BeIdenticalTo(wantShard2))
+ Expect(gotShard3).To(BeNil())
+ })
+ })
Describe("pipeline", func() {
+ It("doesn't panic closed ring, returns error", func() {
+ pipe := ring.Pipeline()
+ for i := 0; i < 3; i++ {
+ err := pipe.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ring.Close()).NotTo(HaveOccurred())
+
+ Expect(func() {
+ _, execErr := pipe.Exec(ctx)
+ Expect(execErr).To(HaveOccurred())
+ }).NotTo(Panic())
+ })
+
It("distributes keys", func() {
pipe := ring.Pipeline()
for i := 0; i < 100; i++ {
@@ -123,7 +252,6 @@ var _ = Describe("Redis Ring", func() {
cmds, err := pipe.Exec(ctx)
Expect(err).NotTo(HaveOccurred())
Expect(cmds).To(HaveLen(100))
- Expect(pipe.Close()).NotTo(HaveOccurred())
for _, cmd := range cmds {
Expect(cmd.Err()).NotTo(HaveOccurred())
@@ -176,7 +304,8 @@ var _ = Describe("Redis Ring", func() {
Describe("new client callback", func() {
It("can be initialized with a new client callback", func() {
opts := redisRingOptions()
- opts.NewClient = func(name string, opt *redis.Options) *redis.Client {
+ opts.NewClient = func(opt *redis.Options) *redis.Client {
+ opt.Username = "username1"
opt.Password = "password1"
return redis.NewClient(opt)
}
@@ -184,7 +313,7 @@ var _ = Describe("Redis Ring", func() {
err := ring.Ping(ctx).Err()
Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("ERR AUTH"))
+ Expect(err.Error()).To(ContainSubstring("WRONGPASS"))
})
})
@@ -203,29 +332,35 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcess")
+
+ return err
+ }
},
})
ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
shard.AddHook(&hook{
- beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
- Expect(cmd.String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcess")
- return ctx, nil
- },
- afterProcess: func(ctx context.Context, cmd redis.Cmder) error {
- Expect(cmd.String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcess")
- return nil
+ processHook: func(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ Expect(cmd.String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcess")
+
+ err := hook(ctx, cmd)
+
+ Expect(cmd.String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcess")
+
+ return err
+ }
},
})
return nil
@@ -248,33 +383,39 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "ring.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "ring.AfterProcessPipeline")
+
+ return err
+ }
},
})
ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
shard.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
@@ -300,33 +441,43 @@ var _ = Describe("Redis Ring", func() {
var stack []string
ring.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: "))
- stack = append(stack, "ring.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
- Expect(cmds).To(HaveLen(1))
- Expect(cmds[0].String()).To(Equal("ping: PONG"))
- stack = append(stack, "ring.AfterProcessPipeline")
- return nil
- },
- })
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ defer GinkgoRecover()
- ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
- shard.AddHook(&hook{
- beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
Expect(cmds).To(HaveLen(3))
Expect(cmds[1].String()).To(Equal("ping: "))
- stack = append(stack, "shard.BeforeProcessPipeline")
- return ctx, nil
- },
- afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error {
+ stack = append(stack, "ring.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
Expect(cmds).To(HaveLen(3))
Expect(cmds[1].String()).To(Equal("ping: PONG"))
- stack = append(stack, "shard.AfterProcessPipeline")
- return nil
+ stack = append(stack, "ring.AfterProcessPipeline")
+
+ return err
+ }
+ },
+ })
+
+ ring.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error {
+ shard.AddHook(&hook{
+ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ defer GinkgoRecover()
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: "))
+ stack = append(stack, "shard.BeforeProcessPipeline")
+
+ err := hook(ctx, cmds)
+
+ Expect(cmds).To(HaveLen(3))
+ Expect(cmds[1].String()).To(Equal("ping: PONG"))
+ stack = append(stack, "shard.AfterProcessPipeline")
+
+ return err
+ }
},
})
return nil
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go
index 5cab18d..626ab03 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/script.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/script.go
@@ -5,12 +5,13 @@ import (
"crypto/sha1"
"encoding/hex"
"io"
- "strings"
)
type Scripter interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
ScriptLoad(ctx context.Context, script string) *StringCmd
}
@@ -50,16 +51,34 @@ func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...in
return c.Eval(ctx, s.src, keys, args...)
}
+func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalRO(ctx, s.src, keys, args...)
+}
+
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(ctx, s.hash, keys, args...)
}
+func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalShaRO(ctx, s.hash, keys, args...)
+}
+
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(ctx, c, keys, args...)
- if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
return s.Eval(ctx, c, keys, args...)
}
return r
}
+
+// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
+// it is retried using EVAL_RO.
+func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalShaRO(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.EvalRO(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go
new file mode 100644
index 0000000..af9c339
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripting_commands.go
@@ -0,0 +1,215 @@
+package redis
+
+import "context"
+
+type ScriptingFunctionsCmdable interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+}
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval", script, keys, args...)
+}
+
+func (c cmdable) EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval_ro", script, keys, args...)
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha", sha1, keys, args...)
+}
+
+func (c cmdable) EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha_ro", sha1, keys, args...)
+}
+
+func (c cmdable) eval(ctx context.Context, name, payload string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = name
+ cmdArgs[1] = payload
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+
+ // it is possible that only args exist without a key.
+ // rdb.eval(ctx, eval, script, nil, arg1, arg2)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------
+
+// FunctionListQuery is used with FunctionList to query for Redis libraries
+//
+// LibraryNamePattern - Use an empty string to get all libraries.
+// - Use a glob-style pattern to match multiple libraries with a matching name
+// - Use a library's full name to match a single library
+// WithCode - If true, it will return the code of the library
+type FunctionListQuery struct {
+ LibraryNamePattern string
+ WithCode bool
+}
+
+func (c cmdable) FunctionLoad(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionLoadReplace(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", "replace", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDelete(ctx context.Context, libName string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "delete", libName)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlush(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionKill(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlushAsync(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd {
+ args := make([]interface{}, 2, 5)
+ args[0] = "function"
+ args[1] = "list"
+ if q.LibraryNamePattern != "" {
+ args = append(args, "libraryname", q.LibraryNamePattern)
+ }
+ if q.WithCode {
+ args = append(args, "withcode")
+ }
+ cmd := NewFunctionListCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDump(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "dump")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionRestore(ctx context.Context, libDump string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "restore", libDump)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionStats(ctx context.Context) *FunctionStatsCmd {
+ cmd := NewFunctionStatsCmd(ctx, "function", "stats")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FCallRo this function simply calls FCallRO,
+// Deprecated: to maintain convention FCallRO.
+func (c cmdable) FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ return c.FCallRO(ctx, function, keys, args...)
+}
+
+func (c cmdable) FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall_ro", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func fcallArgs(command string, function string, keys []string, args ...interface{}) []interface{} {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = command
+ cmdArgs[1] = function
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+
+ cmdArgs = append(cmdArgs, args...)
+ return cmdArgs
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh
index f294c4f..f294c4f 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/bump_deps.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/bump_deps.sh
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh
index 2e78be6..cd4ddee 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/release.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/release.sh
@@ -48,14 +48,15 @@ PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
for dir in $PACKAGE_DIRS
do
printf "${dir}: go get -u && go mod tidy\n"
- (cd ./${dir} && go get -u && go mod tidy)
+ #(cd ./${dir} && go get -u && go mod tidy -compat=1.18)
done
for dir in $PACKAGE_DIRS
do
sed --in-place \
- "s/go-redis\/redis\([^ ]*\) v.*/go-redis\/redis\1 ${TAG}/" "${dir}/go.mod"
- (cd ./${dir} && go get -u && go mod tidy)
+ "s/redis\/go-redis\([^ ]*\) v.*/redis\/go-redis\1 ${TAG}/" "${dir}/go.mod"
+ #(cd ./${dir} && go get -u && go mod tidy -compat=1.18)
+ (cd ./${dir} && go mod tidy -compat=1.18)
done
sed --in-place "s/\(return \)\"[^\"]*\"/\1\"${TAG#v}\"/" ./version.go
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh
index 121f00e..121f00e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/scripts/tag.sh
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/scripts/tag.sh
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go
index ec6221d..188f884 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel.go
@@ -9,9 +9,9 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
)
//------------------------------------------------------------------------------
@@ -24,6 +24,9 @@ type FailoverOptions struct {
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// If specified with SentinelPassword, enables ACL-based authentication (via
// AUTH <user> <pass>).
SentinelUsername string
@@ -32,25 +35,26 @@ type FailoverOptions struct {
// authentication.
SentinelPassword string
- // Allows routing read-only commands to the closest master or slave node.
+ // Allows routing read-only commands to the closest master or replica node.
// This option only works with NewFailoverClusterClient.
RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
+ // Allows routing read-only commands to the random master or replica node.
// This option only works with NewFailoverClusterClient.
RouteRandomly bool
- // Route all commands to slave read-only nodes.
- SlaveOnly bool
+ // Route all commands to replica read-only nodes.
+ ReplicaOnly bool
- // Use slaves disconnected with master when cannot get connected slaves
- // Now, this option only works in RandomSlaveAddr function.
- UseDisconnectedSlaves bool
+ // Use replicas disconnected with master when cannot get connected replicas
+ // Now, this option only works in RandomReplicaAddr function.
+ UseDisconnectedReplicas bool
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
DB int
@@ -59,31 +63,37 @@ type FailoverOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
+
+ DisableIndentity bool
+ IdentitySuffix string
}
func (opt *FailoverOptions) clientOptions() *Options {
return &Options{
- Addr: "FailoverClient",
+ Addr: "FailoverClient",
+ ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
DB: opt.DB,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@@ -91,25 +101,31 @@ func (opt *FailoverOptions) clientOptions() *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
return &Options{
- Addr: addr,
+ Addr: addr,
+ ClientName: opt.ClientName,
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
@@ -122,27 +138,35 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
return &ClusterOptions{
+ ClientName: opt.ClientName,
+
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
+ Protocol: opt.Protocol,
Username: opt.Username,
Password: opt.Password,
@@ -154,19 +178,24 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
}
}
@@ -194,10 +223,21 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
opt := failoverOpt.clientOptions()
- opt.Dialer = masterSlaveDialer(failover)
+ opt.Dialer = masterReplicaDialer(failover)
opt.init()
- connPool := newConnPool(opt)
+ var connPool *pool.ConnPool
+
+ rdb := &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ rdb.init()
+
+ connPool = newConnPool(opt, rdb.dialHook)
+ rdb.connPool = connPool
+ rdb.onClose = failover.Close
failover.mu.Lock()
failover.onFailover = func(ctx context.Context, addr string) {
@@ -207,25 +247,18 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
}
failover.mu.Unlock()
- c := Client{
- baseClient: newBaseClient(opt, connPool),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
- c.onClose = failover.Close
-
- return &c
+ return rdb
}
-func masterSlaveDialer(
+func masterReplicaDialer(
failover *sentinelFailover,
) func(ctx context.Context, network, addr string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var addr string
var err error
- if failover.opt.SlaveOnly {
- addr, err = failover.RandomSlaveAddr(ctx)
+ if failover.opt.ReplicaOnly {
+ addr, err = failover.RandomReplicaAddr(ctx)
} else {
addr, err = failover.MasterAddr(ctx)
if err == nil {
@@ -255,37 +288,30 @@ func masterSlaveDialer(
// SentinelClient is a client for a Redis Sentinel.
type SentinelClient struct {
*baseClient
- hooks
- ctx context.Context
+ hooksMixin
}
func NewSentinelClient(opt *Options) *SentinelClient {
opt.init()
c := &SentinelClient{
baseClient: &baseClient{
- opt: opt,
- connPool: newConnPool(opt),
+ opt: opt,
},
- ctx: context.Background(),
}
- return c
-}
-func (c *SentinelClient) Context() context.Context {
- return c.ctx
-}
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ })
+ c.connPool = newConnPool(opt, c.dialHook)
-func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.ctx = ctx
- return &clone
+ return c
}
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
func (c *SentinelClient) pubSub() *PubSub {
@@ -335,8 +361,8 @@ func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *
return cmd
}
-func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -351,7 +377,7 @@ func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
// Reset resets all the masters with matching name. The pattern argument is a
// glob-style pattern. The reset process clears any previous state in a master
-// (including a failover in progress), and removes every slave and sentinel
+// (including a failover in progress), and removes every replica and sentinel
// already discovered and associated with the master.
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
@@ -368,8 +394,8 @@ func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
}
// Master shows the state and info of the specified master.
-func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -381,9 +407,9 @@ func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
return cmd
}
-// Slaves shows a list of slaves for the specified master and their state.
-func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+// Replicas shows a list of replicas for the specified master and their state.
+func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
_ = c.Process(ctx, cmd)
return cmd
}
@@ -460,18 +486,18 @@ func (c *sentinelFailover) closeSentinel() error {
return firstErr
}
-func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
if c.opt == nil {
return "", errors.New("opt is nil")
}
- addresses, err := c.slaveAddrs(ctx, false)
+ addresses, err := c.replicaAddrs(ctx, false)
if err != nil {
return "", err
}
- if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
- addresses, err = c.slaveAddrs(ctx, true)
+ if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
+ addresses, err = c.replicaAddrs(ctx, true)
if err != nil {
return "", err
}
@@ -489,8 +515,15 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
c.mu.RUnlock()
if sentinel != nil {
- addr := c.getMasterAddr(ctx, sentinel)
- if addr != "" {
+ addr, err := c.getMasterAddr(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
return addr, nil
}
}
@@ -499,11 +532,18 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
defer c.mu.Unlock()
if c.sentinel != nil {
- addr := c.getMasterAddr(ctx, c.sentinel)
- if addr != "" {
+ addr, err := c.getMasterAddr(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
return addr, nil
}
- _ = c.closeSentinel()
}
for i, sentinelAddr := range c.sentinelAddrs {
@@ -511,9 +551,12 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
c.opt.MasterName, err)
- _ = sentinel.Close()
continue
}
@@ -528,14 +571,21 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
}
-func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
if sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, sentinel)
- if len(addrs) > 0 {
+ addrs, err := c.getReplicaAddrs(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
return addrs, nil
}
}
@@ -544,11 +594,21 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
defer c.mu.Unlock()
if c.sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, c.sentinel)
- if len(addrs) > 0 {
+ addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
return addrs, nil
+ } else {
+ // No error and no replicas.
+ _ = c.closeSentinel()
}
- _ = c.closeSentinel()
}
var sentinelReachable bool
@@ -556,15 +616,18 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
- slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
- c.opt.MasterName, err)
_ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
+ c.opt.MasterName, err)
continue
}
sentinelReachable = true
- addrs := parseSlaveAddrs(slaves, useDisconnected)
+ addrs := parseReplicaAddrs(replicas, useDisconnected)
if len(addrs) == 0 {
continue
}
@@ -581,60 +644,42 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool)
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
}
-func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
- c.opt.MasterName, err)
- return ""
+ return "", err
}
- return net.JoinHostPort(addr[0], addr[1])
+ return net.JoinHostPort(addr[0], addr[1]), nil
}
-func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
- addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
+ addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
c.opt.MasterName, err)
- return []string{}
+ return nil, err
}
- return parseSlaveAddrs(addrs, false)
+ return parseReplicaAddrs(addrs, false), nil
}
-func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
+func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
nodes := make([]string, 0, len(addrs))
for _, node := range addrs {
- ip := ""
- port := ""
- flags := []string{}
- lastkey := ""
isDown := false
-
- for _, key := range node.([]interface{}) {
- switch lastkey {
- case "ip":
- ip = key.(string)
- case "port":
- port = key.(string)
- case "flags":
- flags = strings.Split(key.(string), ",")
- }
- lastkey = key.(string)
- }
-
- for _, flag := range flags {
- switch flag {
- case "s_down", "o_down":
- isDown = true
- case "disconnected":
- if !keepDisconnected {
+ if flags, ok := node["flags"]; ok {
+ for _, flag := range strings.Split(flags, ",") {
+ switch flag {
+ case "s_down", "o_down":
isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
}
}
}
-
- if !isDown {
- nodes = append(nodes, net.JoinHostPort(ip, port))
+ if !isDown && node["ip"] != "" && node["port"] != "" {
+ nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
}
}
@@ -672,7 +717,7 @@ func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelCl
c.sentinel = sentinel
c.discoverSentinels(ctx)
- c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
go c.listen(c.pubsub)
}
@@ -683,16 +728,13 @@ func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
return
}
for _, sentinel := range sentinels {
- vals := sentinel.([]interface{})
- var ip, port string
- for i := 0; i < len(vals); i += 2 {
- key := vals[i].(string)
- switch key {
- case "ip":
- ip = vals[i+1].(string)
- case "port":
- port = vals[i+1].(string)
- }
+ ip, ok := sentinel["ip"]
+ if !ok {
+ continue
+ }
+ port, ok := sentinel["port"]
+ if !ok {
+ continue
}
if ip != "" && port != "" {
sentinelAddr := net.JoinHostPort(ip, port)
@@ -742,7 +784,7 @@ func contains(slice []string, str string) bool {
//------------------------------------------------------------------------------
// NewFailoverClusterClient returns a client that supports routing read-only commands
-// to a slave node.
+// to a replica node.
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
@@ -763,14 +805,14 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
Addr: masterAddr,
}}
- slaveAddrs, err := failover.slaveAddrs(ctx, false)
+ replicaAddrs, err := failover.replicaAddrs(ctx, false)
if err != nil {
return nil, err
}
- for _, slaveAddr := range slaveAddrs {
+ for _, replicaAddr := range replicaAddrs {
nodes = append(nodes, ClusterNode{
- Addr: slaveAddr,
+ Addr: replicaAddr,
})
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go
index 753e0fc..8bc6c57 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/sentinel_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sentinel_test.go
@@ -1,14 +1,39 @@
package redis_test
import (
+ "context"
"net"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
+var _ = Describe("Sentinel PROTO 2", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ MaxRetries: -1,
+ Protocol: 2,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ It("should sentinel client PROTO 2", func() {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ })
+})
+
var _ = Describe("Sentinel", func() {
var client *redis.Client
var master *redis.Client
@@ -17,6 +42,7 @@ var _ = Describe("Sentinel", func() {
BeforeEach(func() {
client = redis.NewFailoverClient(&redis.FailoverOptions{
+ ClientName: "sentinel_hi",
MasterName: sentinelName,
SentinelAddrs: sentinelAddrs,
MaxRetries: -1,
@@ -125,6 +151,47 @@ var _ = Describe("Sentinel", func() {
err := client.Ping(ctx).Err()
Expect(err).NotTo(HaveOccurred())
})
+
+ It("should sentinel client setname", func() {
+ Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
+ val, err := client.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=sentinel_hi"))
+ })
+
+ It("should sentinel client PROTO 3", func() {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ })
+})
+
+var _ = Describe("NewFailoverClusterClient PROTO 2", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: sentinelAddrs,
+ Protocol: 2,
+
+ RouteRandomly: true,
+ })
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ It("should sentinel cluster PROTO 2", func() {
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainElements("proto", int64(2)))
+ return nil
+ })
+ })
})
var _ = Describe("NewFailoverClusterClient", func() {
@@ -134,6 +201,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
BeforeEach(func() {
client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
+ ClientName: "sentinel_cluster_hi",
MasterName: sentinelName,
SentinelAddrs: sentinelAddrs,
@@ -173,6 +241,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
})
It("should facilitate failover", func() {
+ Skip("Flaky Test")
// Set value.
err := client.Set(ctx, "foo", "master", 0).Err()
Expect(err).NotTo(HaveOccurred())
@@ -185,13 +254,14 @@ var _ = Describe("NewFailoverClusterClient", func() {
}
// Create subscription.
- ch := client.Subscribe(ctx, "foo").Channel()
+ sub := client.Subscribe(ctx, "foo")
+ ch := sub.Channel()
// Kill master.
err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error {
- return sentinelMaster.Ping(ctx).Err()
+ return master.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred())
// Check that client picked up new master.
@@ -207,10 +277,36 @@ var _ = Describe("NewFailoverClusterClient", func() {
}, "15s", "100ms").Should(Receive(&msg))
Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello"))
+ Expect(sub.Close()).NotTo(HaveOccurred())
_, err = startRedis(masterPort)
Expect(err).NotTo(HaveOccurred())
})
+
+ It("should sentinel cluster client setname", func() {
+ Skip("Flaky Test")
+ err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ return c.Ping(ctx).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := c.ClientList(ctx).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(ContainSubstring("name=sentinel_cluster_hi"))
+ return nil
+ })
+ })
+
+ It("should sentinel cluster PROTO 3", func() {
+ Skip("Flaky Test")
+ _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
+ val, err := client.Do(ctx, "HELLO").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).Should(HaveKeyWithValue("proto", int64(3)))
+ return nil
+ })
+ })
})
var _ = Describe("SentinelAclAuth", func() {
@@ -221,14 +317,14 @@ var _ = Describe("SentinelAclAuth", func() {
var client *redis.Client
var sentinel *redis.SentinelClient
- var sentinels = func() []*redisProcess {
+ sentinels := func() []*redisProcess {
return []*redisProcess{sentinel1, sentinel2, sentinel3}
}
BeforeEach(func() {
authCmd := redis.NewStatusCmd(ctx, "ACL", "SETUSER", aclSentinelUsername, "ON",
">"+aclSentinelPassword, "-@all", "+auth", "+client|getname", "+client|id", "+client|setname",
- "+command", "+hello", "+ping", "+role", "+sentinel|get-master-addr-by-name", "+sentinel|master",
+ "+command", "+hello", "+ping", "+client|setinfo", "+role", "+sentinel|get-master-addr-by-name", "+sentinel|master",
"+sentinel|myid", "+sentinel|replicas", "+sentinel|sentinels")
for _, process := range sentinels() {
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go
new file mode 100644
index 0000000..cef8ad6
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/set_commands.go
@@ -0,0 +1,217 @@
+package redis
+
+import "context"
+
+type SetCmdable interface {
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go
new file mode 100644
index 0000000..6701402
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/sortedset_commands.go
@@ -0,0 +1,772 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type SortedSetCmdable interface {
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
+}
+
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ XX: true,
+ Members: members,
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the <Start> and <Stop> options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go
new file mode 100644
index 0000000..0a98692
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/stream_commands.go
@@ -0,0 +1,438 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StreamCmdable interface {
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+}
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 6+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go
new file mode 100644
index 0000000..eff5880
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/string_commands.go
@@ -0,0 +1,303 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StringCmdable interface {
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEx`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go
new file mode 100644
index 0000000..6f1b2fa
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands.go
@@ -0,0 +1,922 @@
+package redis
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type TimeseriesCmdable interface {
+ TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd
+ TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd
+ TSCreate(ctx context.Context, key string) *StatusCmd
+ TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd
+ TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd
+ TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd
+ TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd
+ TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd
+ TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd
+ TSGet(ctx context.Context, key string) *TSTimestampValueCmd
+ TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd
+ TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd
+ TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd
+ TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd
+ TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd
+ TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd
+ TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd
+ TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd
+ TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd
+ TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd
+ TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd
+}
+
+type TSOptions struct {
+ Retention int
+ ChunkSize int
+ Encoding string
+ DuplicatePolicy string
+ Labels map[string]string
+}
+type TSIncrDecrOptions struct {
+ Timestamp int64
+ Retention int
+ ChunkSize int
+ Uncompressed bool
+ Labels map[string]string
+}
+
+type TSAlterOptions struct {
+ Retention int
+ ChunkSize int
+ DuplicatePolicy string
+ Labels map[string]string
+}
+
+type TSCreateRuleOptions struct {
+ alignTimestamp int64
+}
+
+type TSGetOptions struct {
+ Latest bool
+}
+
+type TSInfoOptions struct {
+ Debug bool
+}
+type Aggregator int
+
+const (
+ Invalid = Aggregator(iota)
+ Avg
+ Sum
+ Min
+ Max
+ Range
+ Count
+ First
+ Last
+ StdP
+ StdS
+ VarP
+ VarS
+ Twa
+)
+
+func (a Aggregator) String() string {
+ switch a {
+ case Invalid:
+ return ""
+ case Avg:
+ return "AVG"
+ case Sum:
+ return "SUM"
+ case Min:
+ return "MIN"
+ case Max:
+ return "MAX"
+ case Range:
+ return "RANGE"
+ case Count:
+ return "COUNT"
+ case First:
+ return "FIRST"
+ case Last:
+ return "LAST"
+ case StdP:
+ return "STD.P"
+ case StdS:
+ return "STD.S"
+ case VarP:
+ return "VAR.P"
+ case VarS:
+ return "VAR.S"
+ case Twa:
+ return "TWA"
+ default:
+ return ""
+ }
+}
+
+type TSRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSMRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMGetOptions struct {
+ Latest bool
+ WithLabels bool
+ SelectedLabels []interface{}
+}
+
+// TSAdd - Adds one or more observations to a t-digest sketch.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAddWithArgs - Adds one or more observations to a t-digest sketch.
+// This function also allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreate - Creates a new time-series key.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateWithArgs - Creates a new time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAlter - Alters an existing time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize and DuplicatePolicy.
+// For more information - https://redis.io/commands/ts.alter/
+func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd {
+ args := []interface{}{"TS.ALTER", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRule - Creates a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRuleWithArgs - Creates a compaction rule from sourceKey to destKey with additional option.
+// This function allows for specifying additional option such as:
+// alignTimestamp.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ if options != nil {
+ if options.alignTimestamp != 0 {
+ args = append(args, options.alignTimestamp)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrBy - Increments the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.INCRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrByWithArgs - Increments the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.INCRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrBy - Decrements the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.DECRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrByWithArgs - Decrements the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.DECRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDel - Deletes a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.del/
+func (c cmdable) TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd {
+ args := []interface{}{"TS.DEL", Key, fromTimestamp, toTimestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDeleteRule - Deletes a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.deleterule/
+func (c cmdable) TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd {
+ args := []interface{}{"TS.DELETERULE", sourceKey, destKey}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGetWithArgs - Gets the last sample of a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Latest.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ }
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGet - Gets the last sample of a time-series key.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGet(ctx context.Context, key string) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValue struct {
+ Timestamp int64
+ Value float64
+}
+type TSTimestampValueCmd struct {
+ baseCmd
+ val TSTimestampValue
+}
+
+func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd {
+ return &TSTimestampValueCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueCmd) SetVal(val TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueCmd) Result() (TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueCmd) Val() TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = TSTimestampValue{}
+ for i := 0; i < n; i++ {
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Timestamp = timestamp
+ cmd.val.Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSInfo - Returns information about a time-series key.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSInfoWithArgs - Returns information about a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Debug.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ if options != nil {
+ if options.Debug {
+ args = append(args, "DEBUG")
+ }
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMAdd - Adds multiple samples to multiple time-series keys.
+// It accepts a slice of 'ktv' slices, each containing exactly three elements: key, timestamp, and value.
+// This struct must be provided for this command to work.
+// For more information - https://redis.io/commands/ts.madd/
+func (c cmdable) TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd {
+ args := []interface{}{"TS.MADD"}
+ for _, ktv := range ktvSlices {
+ args = append(args, ktv...)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSQueryIndex - Returns all the keys matching the filter expression.
+// For more information - https://redis.io/commands/ts.queryindex/
+func (c cmdable) TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd {
+ args := []interface{}{"TS.QUERYINDEX"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRange - Returns a range of samples from a time-series key in reverse order.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRangeWithArgs - Returns a range of samples from a time-series key in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRange - Returns a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRangeWithArgs - Returns a range of samples from a time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValueSliceCmd struct {
+ baseCmd
+ val []TSTimestampValue
+}
+
+func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd {
+ return &TSTimestampValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueSliceCmd) SetVal(val []TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueSliceCmd) Result() ([]TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueSliceCmd) Val() []TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]TSTimestampValue, n)
+ for i := 0; i < n; i++ {
+ _, _ = rd.ReadArrayLen()
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Timestamp = timestamp
+ cmd.val[i].Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSMRange - Returns a range of samples from multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRangeWithArgs - Returns a range of samples from multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRange - Returns a range of samples from multiple time-series keys in reverse order.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRangeWithArgs - Returns a range of samples from multiple time-series keys in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGet - Returns the last sample of multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET", "FILTER"}
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGetWithArgs - Returns the last sample of multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, WithLabels and SelectedLabels.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET"}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go
new file mode 100644
index 0000000..563f24e
--- /dev/null
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/timeseries_commands_test.go
@@ -0,0 +1,940 @@
+package redis_test
+
+import (
+ "context"
+ "strings"
+
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() {
+ ctx := context.TODO()
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(&redis.Options{Addr: rediStackAddr})
+ Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should TSCreate and TSCreateWithArgs", Label("timeseries", "tscreate", "tscreateWithArgs"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ // Test TSCreateWithArgs
+ opt := &redis.TSOptions{Retention: 5}
+ result, err = client.TSCreateWithArgs(ctx, "2", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}}
+ result, err = client.TSCreateWithArgs(ctx, "3", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Time": "Series"}, Retention: 20}
+ result, err = client.TSCreateWithArgs(ctx, "4", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ // Test chunk size
+ opt = &redis.TSOptions{ChunkSize: 128}
+ result, err = client.TSCreateWithArgs(ctx, "ts-cs-1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ // Test duplicate policy
+ duplicate_policies := []string{"BLOCK", "LAST", "FIRST", "MIN", "MAX"}
+ for _, dup := range duplicate_policies {
+ keyName := "ts-dup-" + dup
+ opt = &redis.TSOptions{DuplicatePolicy: dup}
+ result, err = client.TSCreateWithArgs(ctx, keyName, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err = client.TSInfo(ctx, keyName).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(strings.ToUpper(resultInfo["duplicatePolicy"].(string))).To(BeEquivalentTo(dup))
+
+ }
+ })
+ It("should TSAdd and TSAddWithArgs", Label("timeseries", "tsadd", "tsaddWithArgs"), func() {
+ result, err := client.TSAdd(ctx, "1", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ // Test TSAddWithArgs
+ opt := &redis.TSOptions{Retention: 10}
+ result, err = client.TSAddWithArgs(ctx, "2", 2, 3, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(2))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}}
+ result, err = client.TSAddWithArgs(ctx, "3", 3, 2, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(3))
+ opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs", "Time": "Series"}, Retention: 10}
+ result, err = client.TSAddWithArgs(ctx, "4", 4, 2, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(4))
+ resultInfo, err := client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ // Test chunk size
+ opt = &redis.TSOptions{ChunkSize: 128}
+ result, err = client.TSAddWithArgs(ctx, "ts-cs-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ // Test duplicate policy
+ // LAST
+ opt = &redis.TSOptions{DuplicatePolicy: "LAST"}
+ result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err := client.TSGet(ctx, "tsal-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(10))
+ // FIRST
+ opt = &redis.TSOptions{DuplicatePolicy: "FIRST"}
+ result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsaf-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(5))
+ // MAX
+ opt = &redis.TSOptions{DuplicatePolicy: "MAX"}
+ result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsam-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(10))
+ // MIN
+ opt = &redis.TSOptions{DuplicatePolicy: "MIN"}
+ result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo(1))
+ resultGet, err = client.TSGet(ctx, "tsami-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(5))
+ })
+
+ It("should TSAlter", Label("timeseries", "tsalter"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(0))
+
+ opt := &redis.TSAlterOptions{Retention: 10}
+ resultAlter, err := client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+
+ opt = &redis.TSAlterOptions{Labels: map[string]string{"Time": "Series"}}
+ resultAlter, err = client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series"))
+ Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10))
+ Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo(redis.Nil))
+ opt = &redis.TSAlterOptions{DuplicatePolicy: "min"}
+ resultAlter, err = client.TSAlter(ctx, "1", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAlter).To(BeEquivalentTo("OK"))
+
+ resultInfo, err = client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("min"))
+ })
+
+ It("should TSCreateRule and TSDeleteRule", Label("timeseries", "tscreaterule", "tsdeleterule"), func() {
+ result, err := client.TSCreate(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ result, err = client.TSCreate(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ result, err = client.TSCreateRule(ctx, "1", "2", redis.Avg, 100).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo("OK"))
+ for i := 0; i < 50; i++ {
+ resultAdd, err := client.TSAdd(ctx, "1", 100+i*2, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 + i*2))
+ resultAdd, err = client.TSAdd(ctx, "1", 100+i*2+1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 + i*2 + 1))
+
+ }
+ resultAdd, err := client.TSAdd(ctx, "1", 100*2, 1.5).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultAdd).To(BeEquivalentTo(100 * 2))
+ resultGet, err := client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet.Value).To(BeEquivalentTo(1.5))
+ Expect(resultGet.Timestamp).To(BeEquivalentTo(100))
+
+ resultDeleteRule, err := client.TSDeleteRule(ctx, "1", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultDeleteRule).To(BeEquivalentTo("OK"))
+ resultInfo, err := client.TSInfo(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["rules"]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ })
+
+ It("should TSIncrBy, TSIncrByWithArgs, TSDecrBy and TSDecrByWithArgs", Label("timeseries", "tsincrby", "tsdecrby", "tsincrbyWithArgs", "tsdecrbyWithArgs"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSIncrBy(ctx, "1", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err := client.TSGet(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Value).To(BeEquivalentTo(100))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSDecrBy(ctx, "1", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err = client.TSGet(ctx, "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Value).To(BeEquivalentTo(0))
+
+ opt := &redis.TSIncrDecrOptions{Timestamp: 5}
+ _, err = client.TSIncrByWithArgs(ctx, "2", 1.5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(5))
+ Expect(result.Value).To(BeEquivalentTo(1.5))
+
+ opt = &redis.TSIncrDecrOptions{Timestamp: 7}
+ _, err = client.TSIncrByWithArgs(ctx, "2", 2.25, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(7))
+ Expect(result.Value).To(BeEquivalentTo(3.75))
+
+ opt = &redis.TSIncrDecrOptions{Timestamp: 15}
+ _, err = client.TSDecrByWithArgs(ctx, "2", 1.5, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err = client.TSGet(ctx, "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(15))
+ Expect(result.Value).To(BeEquivalentTo(2.25))
+
+ // Test chunk size INCRBY
+ opt = &redis.TSIncrDecrOptions{ChunkSize: 128}
+ _, err = client.TSIncrByWithArgs(ctx, "3", 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err := client.TSInfo(ctx, "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+
+ // Test chunk size DECRBY
+ opt = &redis.TSIncrDecrOptions{ChunkSize: 128}
+ _, err = client.TSDecrByWithArgs(ctx, "4", 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ resultInfo, err = client.TSInfo(ctx, "4").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128))
+ })
+
+ It("should TSGet", Label("timeseries", "tsget"), func() {
+ opt := &redis.TSOptions{DuplicatePolicy: "max"}
+ resultGet, err := client.TSAddWithArgs(ctx, "foo", 2265985, 151, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo(2265985))
+ result, err := client.TSGet(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(2265985))
+ Expect(result.Value).To(BeEquivalentTo(151))
+ })
+
+ It("should TSGet Latest", Label("timeseries", "tsgetlatest", "NonRedisEnterprise"), func() {
+ resultGet, err := client.TSCreate(ctx, "tsgl-1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ resultGet, err = client.TSCreate(ctx, "tsgl-2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+
+ resultGet, err = client.TSCreateRule(ctx, "tsgl-1", "tsgl-2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "tsgl-1", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "tsgl-1", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ result, errGet := client.TSGet(ctx, "tsgl-2").Result()
+ Expect(errGet).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(0))
+ Expect(result.Value).To(BeEquivalentTo(4))
+ result, errGet = client.TSGetWithArgs(ctx, "tsgl-2", &redis.TSGetOptions{Latest: true}).Result()
+ Expect(errGet).NotTo(HaveOccurred())
+ Expect(result.Timestamp).To(BeEquivalentTo(10))
+ Expect(result.Value).To(BeEquivalentTo(8))
+ })
+
+ It("should TSInfo", Label("timeseries", "tsinfo"), func() {
+ resultGet, err := client.TSAdd(ctx, "foo", 2265985, 151).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo(2265985))
+ result, err := client.TSInfo(ctx, "foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["firstTimestamp"]).To(BeEquivalentTo(2265985))
+ })
+
+ It("should TSMAdd", Label("timeseries", "tsmadd"), func() {
+ resultGet, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultGet).To(BeEquivalentTo("OK"))
+ ktvSlices := make([][]interface{}, 3)
+ for i := 0; i < 3; i++ {
+ ktvSlices[i] = make([]interface{}, 3)
+ ktvSlices[i][0] = "a"
+ for j := 1; j < 3; j++ {
+ ktvSlices[i][j] = (i + j) * j
+ }
+ }
+ result, err := client.TSMAdd(ctx, ktvSlices).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]int64{1, 2, 3}))
+ })
+
+ It("should TSMGet and TSMGetWithArgs", Label("timeseries", "tsmget", "tsmgetWithArgs", "NonRedisEnterprise"), func() {
+ opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "a", "*", 15).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", "*", 25).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ result, err := client.TSMGet(ctx, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][1].([]interface{})[1]).To(BeEquivalentTo(15))
+ Expect(result["b"][1].([]interface{})[1]).To(BeEquivalentTo(25))
+ mgetOpt := &redis.TSMGetOptions{WithLabels: true}
+ result, err = client.TSMGetWithArgs(ctx, []string{"Test=This"}, mgetOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "Taste": "That"}))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreateRule, err := client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ result, err = client.TSMGet(ctx, []string{"is_compaction=true"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(0), 4.0}))
+ mgetOpt = &redis.TSMGetOptions{Latest: true}
+ result, err = client.TSMGetWithArgs(ctx, []string{"is_compaction=true"}, mgetOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(10), 8.0}))
+ })
+
+ It("should TSQueryIndex", Label("timeseries", "tsqueryindex"), func() {
+ opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ result, err := client.TSQueryIndex(ctx, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ result, err = client.TSQueryIndex(ctx, []string{"Taste=That"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(1))
+ })
+
+ It("should TSDel and TSRange", Label("timeseries", "tsdel", "tsrange"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ resultDelete, err := client.TSDel(ctx, "a", 0, 21).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultDelete).To(BeEquivalentTo(22))
+
+ resultRange, err := client.TSRange(ctx, "a", 0, 21).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange).To(BeEquivalentTo([]redis.TSTimestampValue{}))
+
+ resultRange, err = client.TSRange(ctx, "a", 22, 22).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 22, Value: 1}))
+ })
+
+ It("should TSRange, TSRangeWithArgs", Label("timeseries", "tsrange", "tsrangeWithArgs", "NonRedisEnterprise"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ }
+ result, err := client.TSRange(ctx, "a", 0, 200).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(100))
+ for i := 0; i < 100; i++ {
+ client.TSAdd(ctx, "a", i+200, float64(i%7))
+ }
+ result, err = client.TSRange(ctx, "a", 0, 500).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(200))
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ opt := &redis.TSRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 10}, {Timestamp: 10, Value: 1}}))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "5"}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 5}, {Timestamp: 5, Value: 6}}))
+ opt = &redis.TSRangeOptions{Aggregator: redis.Twa, BucketDuration: 10}
+ result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 2.55}, {Timestamp: 10, Value: 3}}))
+ // Test Range Latest
+ resultCreate, err := client.TSCreate(ctx, "t1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreate, err = client.TSCreate(ctx, "t2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRule).To(BeEquivalentTo("OK"))
+ _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ resultRange, err := client.TSRange(ctx, "t1", 0, 20).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 1, Value: 1}))
+
+ opt = &redis.TSRangeOptions{Latest: true}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t2", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ // Test Bucket Timestamp
+ resultCreate, err = client.TSCreate(ctx, "t3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 20, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+ // Test Empty
+ _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true}
+ resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4}))
+ Expect(len(resultRange)).To(BeEquivalentTo(7))
+ })
+
+ It("should TSRevRange, TSRevRangeWithArgs", Label("timeseries", "tsrevrange", "tsrevrangeWithArgs", "NonRedisEnterprise"), func() {
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ }
+ result, err := client.TSRange(ctx, "a", 0, 200).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(100))
+ for i := 0; i < 100; i++ {
+ client.TSAdd(ctx, "a", i+200, float64(i%7))
+ }
+ result, err = client.TSRange(ctx, "a", 0, 500).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(200))
+
+ opt := &redis.TSRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(20))
+
+ opt = &redis.TSRevRangeOptions{Count: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(10))
+
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ opt = &redis.TSRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 1}, {Timestamp: 0, Value: 10}}))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "1"}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1, Value: 10}, {Timestamp: 0, Value: 1}}))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Twa, BucketDuration: 10}
+ result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 3}, {Timestamp: 0, Value: 2.55}}))
+ // Test Range Latest
+ resultCreate, err := client.TSCreate(ctx, "t1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultCreate, err = client.TSCreate(ctx, "t2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRule).To(BeEquivalentTo("OK"))
+ _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ resultRange, err := client.TSRange(ctx, "t2", 0, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ opt = &redis.TSRevRangeOptions{Latest: true}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 10, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 8}))
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 9, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4}))
+ // Test Bucket Timestamp
+ resultCreate, err = client.TSCreate(ctx, "t3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 80, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+ // Test Empty
+ _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+ _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result()
+ Expect(errAdd).NotTo(HaveOccurred())
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(3))
+
+ opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true}
+ resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5}))
+ Expect(len(resultRange)).To(BeEquivalentTo(7))
+ })
+
+ It("should TSMRange and TSMRangeWithArgs", Label("timeseries", "tsmrange", "tsmrangeWithArgs"), func() {
+ createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ result, err := client.TSMRange(ctx, 0, 200, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100))
+ // Test Count
+ mrangeOpt := &redis.TSMRangeOptions{Count: 10}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10))
+ // Test Aggregation and BucketDuration
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20))
+ // Test WithLabels
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ mrangeOpt = &redis.TSMRangeOptions{WithLabels: true}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"}))
+ // Test SelectedLabels
+ mrangeOpt = &redis.TSMRangeOptions{SelectedLabels: []interface{}{"team"}}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"}))
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"}))
+ // Test FilterBy
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ mrangeOpt = &redis.TSMRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(15), 1.0}, []interface{}{int64(16), 2.0}}))
+ // Test GroupBy
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "sum"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(3), 6.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "max"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "team", Reducer: "min"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+ Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}}))
+ // Test Align
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 10.0}, []interface{}{int64(10), 1.0}}))
+
+ mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 5}
+ result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 5.0}, []interface{}{int64(5), 6.0}}))
+ })
+
+ It("should TSMRangeWithArgs Latest", Label("timeseries", "tsmrangeWithArgs", "tsmrangelatest", "NonRedisEnterprise"), func() {
+ resultCreate, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+
+ _, err = client.TSAdd(ctx, "a", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ mrangeOpt := &redis.TSMRangeOptions{Latest: true}
+ result, err := client.TSMRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}}))
+ Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}}))
+ })
+ It("should TSMRevRange and TSMRevRangeWithArgs", Label("timeseries", "tsmrevrange", "tsmrevrangeWithArgs"), func() {
+ createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}}
+ resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ result, err := client.TSMRevRange(ctx, 0, 200, []string{"Test=This"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100))
+ // Test Count
+ mrangeOpt := &redis.TSMRevRangeOptions{Count: 10}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10))
+ // Test Aggregation and BucketDuration
+ for i := 0; i < 100; i++ {
+ _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20))
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ // Test WithLabels
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{}))
+ mrangeOpt = &redis.TSMRevRangeOptions{WithLabels: true}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"}))
+ // Test SelectedLabels
+ mrangeOpt = &redis.TSMRevRangeOptions{SelectedLabels: []interface{}{"team"}}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"}))
+ Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"}))
+ // Test FilterBy
+ fts := make([]int, 0)
+ for i := 10; i < 20; i++ {
+ fts = append(fts, i)
+ }
+ mrangeOpt = &redis.TSMRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(16), 2.0}, []interface{}{int64(15), 1.0}}))
+ // Test GroupBy
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "sum"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 6.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(0), 0.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "max"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "team", Reducer: "min"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(result)).To(BeEquivalentTo(2))
+ Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+ Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}}))
+ // Test Align
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 1.0}, []interface{}{int64(0), 10.0}}))
+
+ mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 1}
+ result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(1), 10.0}, []interface{}{int64(0), 1.0}}))
+ })
+
+ It("should TSMRevRangeWithArgs Latest", Label("timeseries", "tsmrevrangeWithArgs", "tsmrevrangelatest", "NonRedisEnterprise"), func() {
+ resultCreate, err := client.TSCreate(ctx, "a").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreate, err = client.TSCreate(ctx, "c").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+ opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}}
+ resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreate).To(BeEquivalentTo("OK"))
+
+ resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+ resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(resultCreateRule).To(BeEquivalentTo("OK"))
+
+ _, err = client.TSAdd(ctx, "a", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "a", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = client.TSAdd(ctx, "c", 1, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 11, 7).Result()
+ Expect(err).NotTo(HaveOccurred())
+ _, err = client.TSAdd(ctx, "c", 13, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ mrangeOpt := &redis.TSMRevRangeOptions{Latest: true}
+ result, err := client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}}))
+ Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}}))
+ })
+})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go
index 8c9d872..039eaf3 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx.go
@@ -3,8 +3,8 @@ package redis
import (
"context"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// TxFailedErr transaction redis failed.
@@ -19,18 +19,16 @@ type Tx struct {
baseClient
cmdable
statefulCmdable
- hooks
- ctx context.Context
+ hooksMixin
}
-func (c *Client) newTx(ctx context.Context) *Tx {
+func (c *Client) newTx() *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool),
},
- hooks: c.hooks.clone(),
- ctx: ctx,
+ hooksMixin: c.hooksMixin.clone(),
}
tx.init()
return &tx
@@ -39,25 +37,19 @@ func (c *Client) newTx(ctx context.Context) *Tx {
func (c *Tx) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
-}
-
-func (c *Tx) Context() context.Context {
- return c.ctx
-}
-func (c *Tx) WithContext(ctx context.Context) *Tx {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.init()
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
}
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
// Watch prepares a transaction and marks the keys to be watched
@@ -65,7 +57,7 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
//
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- tx := c.newTx(ctx)
+ tx := c.newTx()
defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
@@ -109,9 +101,8 @@ func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ return c.processPipelineHook(ctx, cmds)
},
}
pipe.init()
@@ -139,11 +130,22 @@ func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
func (c *Tx) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdsCopy := make([]Cmder, len(cmds)+2)
+ cmdsCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdsCopy[1:], cmds)
+ cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdsCopy
+}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go
index 7deb2df..1146d46 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/tx_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/tx_test.go
@@ -5,10 +5,10 @@ import (
"strconv"
"sync"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("Tx", func() {
@@ -64,7 +64,7 @@ var _ = Describe("Tx", func() {
Expect(n).To(Equal(int64(100)))
})
- It("should discard", func() {
+ It("should discard", Label("NonRedisEnterprise"), func() {
err := client.Watch(ctx, func(tx *redis.Tx) error {
cmds, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, "key1", "hello1", 0)
@@ -143,9 +143,6 @@ var _ = Describe("Tx", func() {
}
err = do()
- Expect(err).To(MatchError("bad connection"))
-
- err = do()
Expect(err).NotTo(HaveOccurred())
})
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go
index c89b3e5..275bef3 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal.go
@@ -14,6 +14,9 @@ type UniversalOptions struct {
// of cluster/sentinel nodes.
Addrs []string
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
// Database to be selected after connecting to the server.
// Only single-node and failover clients.
DB int
@@ -23,6 +26,7 @@ type UniversalOptions struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(ctx context.Context, cn *Conn) error
+ Protocol int
Username string
Password string
SentinelUsername string
@@ -32,19 +36,21 @@ type UniversalOptions struct {
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
TLSConfig *tls.Config
@@ -59,6 +65,9 @@ type UniversalOptions struct {
// Only failover clients.
MasterName string
+
+ DisableIndentity bool
+ IdentitySuffix string
}
// Cluster returns cluster options created from the universal options.
@@ -68,10 +77,12 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
}
return &ClusterOptions{
- Addrs: o.Addrs,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
+ Addrs: o.Addrs,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@@ -84,18 +95,25 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -108,11 +126,13 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
return &FailoverOptions{
SentinelAddrs: o.Addrs,
MasterName: o.MasterName,
+ ClientName: o.ClientName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
SentinelUsername: o.SentinelUsername,
@@ -122,19 +142,24 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -146,11 +171,13 @@ func (o *UniversalOptions) Simple() *Options {
}
return &Options{
- Addr: addr,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
+ Addr: addr,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
DB: o.DB,
+ Protocol: o.Protocol,
Username: o.Username,
Password: o.Password,
@@ -158,19 +185,24 @@ func (o *UniversalOptions) Simple() *Options {
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
}
}
@@ -182,13 +214,13 @@ func (o *UniversalOptions) Simple() *Options {
// clients in different environments.
type UniversalClient interface {
Cmdable
- Context() context.Context
AddHook(Hook)
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
Do(ctx context.Context, args ...interface{}) *Cmd
Process(ctx context.Context, cmd Cmder) error
Subscribe(ctx context.Context, channels ...string) *PubSub
PSubscribe(ctx context.Context, channels ...string) *PubSub
+ SSubscribe(ctx context.Context, channels ...string) *PubSub
Close() error
PoolStats() *PoolStats
}
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go
index 7491a1d..747c68a 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/universal_test.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/universal_test.go
@@ -1,10 +1,10 @@
package redis_test
import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
+ . "github.com/bsm/ginkgo/v2"
+ . "github.com/bsm/gomega"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
var _ = Describe("UniversalClient", func() {
@@ -17,6 +17,7 @@ var _ = Describe("UniversalClient", func() {
})
It("should connect to failover servers", func() {
+ Skip("Flaky Test")
client = redis.NewUniversalClient(&redis.UniversalOptions{
MasterName: sentinelName,
Addrs: sentinelAddrs,
@@ -31,7 +32,7 @@ var _ = Describe("UniversalClient", func() {
Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred())
})
- It("should connect to clusters", func() {
+ It("should connect to clusters", Label("NonRedisEnterprise"), func() {
client = redis.NewUniversalClient(&redis.UniversalOptions{
Addrs: cluster.addrs(),
})
diff --git a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go
index 112c9a2..e2c7f3e 100644
--- a/dependencies/pkg/mod/github.com/go-redis/redis/v8@v8.11.5/version.go
+++ b/dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.5.1/version.go
@@ -2,5 +2,5 @@ package redis
// Version is the current release version.
func Version() string {
- return "8.11.5"
+ return "9.5.1"
}